text
stringlengths 1
2.05k
|
---|
import workload_registry
def record_common(dag, s):
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
inp = auto_scheduler.measure.MeasureInput(task, s)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
record_str = auto_scheduler.measure_record.dump_record_to_string(inp, res)
r_inp, r_res = auto_scheduler.measure_record.load_record_from_string(record_str)
assert inp.task.workload_key == r_inp.task.workload_key
assert str(res) == str(r_res)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
s1 = dag.infer_bound_from_state(s)
s2 = dag.infer_bound_from_state(inputs[0].state)
assert s1 == s2
assert not (s1 == dag.get_init_state())
def test_record_split_reorder_fuse_annotation():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
its0 = s.split(C, s[C].iters[0], [4, 8, 8])
its1 = s.split(C, s[C].iters[4], [8, 4, 4])
s.reorder(
C, [its0[0], its1[0], its0[1], its1[1], its0[2], its1[2], its0[3], s[C].iters[8], its1[3]]
)
s.fuse(C, [s[C].iters[0], s[C].iters[1], s[C].iters[2]])
s.parallel(C, s[C].iters[0])
s.bind(C, s[C].iters[1], "blockIdx.x")
s.bind(C, s[C].iters[2], "threadIdx.z")
s.bind(C, s[C].iters[3], "vthread")
s.unroll(C, s[C].iters[4])
s.vectorize(C, s[C].iters[6])
record_common(dag, s)
def test_record_compute_at_root_inline_cache_read_write():
if not |
tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
AA = topi.nn.relu(A)
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(AA[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
C_shared = s.cache_write(C, "shared")
s.compute_at(C_shared, C, s[C].iters[0])
B_global = s.cache_read(B, "global", [C_shared])
s.compute_at(B_global, C_shared, s[C_shared].iters[2])
s.compute_inline(AA)
s.compute_root(C_shared)
record_common(dag, s)
def test_record_follow_split_follow_fused_split():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
dag = auto_scheduler.ComputeDAG([A, B, E])
s = dag.get_init_state()
s.split(C, s[C].iters[0], [4, 2, 8, 4], True)
split_step0 = len(s.transform_steps) - 1
s.follow_split(C, s[C].iters[5], split_step0, 4)
its0 = s.split(E, s[E].iters[0], [4, 2, 8, 4], True)
split_step1 = len(s.transform_steps) - 1
its1 = s.split(E, s[E].iters[5], [2, 4, 2, 4], True)
split_step2 = len(s.transform_steps) - 1
its = []
for i0, i1 in zip(its0, its1):
its.append(i0)
its.append(i1)
for i in range(0, 5):
s.fuse(E, [s[E].iters[i], s[E].iters[i + 1]])
s.follow_fused_split(D, s[D].iters[0], [split_step1, split_step2], 2, True)
record_common(dag, s)
def test_record_pragma_storage_align_rfactor():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = |
te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
ko, _ = s.split(C, s[C].iters[2], [16])
s.rfactor(C, ko, 2)
s.pragma(C, s[C].iters[0], "auto_unroll_max_step$64")
s.storage_align(C, s[C].iters[-1], 8, 4)
record_common(dag, s)
def test_recover_measure_input():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(correct_inp.task.compute_dag) == str(inp.task.compute_dag)
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp, rebuild_state=True)
assert str(correct_inp.state) == str(inp.state)
def test_workload_dis_factor():
calc = auto_scheduler.utils.calc_workload_dis_factor
decode = auto_scheduler.utils.decode_workload_key
target_wkl_key = json.dumps(
["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "float32"]
)
assert calc(decode(target_wkl_key), decode(target_wkl_key)) == 1
wkl_key = json.dumps(["func1", [1, 3, 112, 112], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == 8 * 2 * 2
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [1, 1], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [0, 0], "float32"])
ass |
ert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "int8"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
wkl_key = json.dumps(["func2", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
wkl_key = json.dumps(["func1", [8, 3, 223, 223], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
def test_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_dag_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
tensors = [A, B, E]
dag = auto_scheduler.ComputeDAG(tensors)
key = workload_registry.register_workload_tensors(dag.workload_key(), tensors)
transfer_data = workload_registry.serialize_workload_registry_entry(key)
f_data = pickle.dumps(transfer_data)
f_new = pickle.loads(f_data)
del workload_registry.WORKLOAD_FUNC_REGISTRY[key]
workloa |
d_registry.deserialize_workload_registry_entry(f_new)
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key=key, target=target)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_workload_serialization():
key = tvm.auto_scheduler.utils.get_func_name(matmul_auto_scheduler_test)
transfer_data = workload_registry.serialize_workload_registry_entry(key)
f_data = pickle.dumps(transfer_data)
f_new = pickle.loads(f_data)
del workload_registry.WORKLOAD_FUNC_REGISTRY[key]
workload_registry.deserialize_workload_registry_entry(f_new)
def test_measure_local_builder_rpc_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
del measure_ctx
def measure_local_builder_rpc_runner_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_measure_local_builder_rpc_runner()
@tvm.testing.requires_llvm
def test_measure_local_builder_r |
pc_runner_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=measure_local_builder_rpc_runner_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_measure_target_host():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target=tvm.target.Target("llvm", "llvm -mtriple=aarch64-linux-gnu"),
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
recovered_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(recovered_inp.task.target.host) == str(inp.task.target.host)
@tvm.testing.requires_llvm
def test_measure_special_inputs_map_by_name_local_runner():
@auto_scheduler.register_workload
def foo():
X = te.placeholder(shape=[10], dtype="int32")
Index = te.placeholder(shape=[1], dtype="int32", name="Index")
Y = te.compute((1,), lambda i: X[Index[i]])
return [X, Index, Y]
task = auto_scheduler.SearchTask(
func=foo,
target="llvm",
task_inputs={
"Index": tvm.nd.array(np.array([5], dtype="int32")),
},
)
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(timeout=10)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
@tvm.testing.requires_llvm
def test_measure_special_inputs_map_by_name_rpc_runner():
@auto_scheduler.register_workload
def foo():
X = te.placeholder(shape=[10], dty |
pe="int32")
Index = te.placeholder(shape=[1], dtype="int32", name="Index")
Y = te.compute((1,), lambda i: X[Index[i]])
return [X, Index, Y]
task = auto_scheduler.SearchTask(
func=foo,
target="llvm",
task_inputs={
"Index": tvm.nd.array(np.array([5], dtype="int32")),
},
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
if __name__ == "__main__":
tvm.testing.main() |
"""Test search policy""" |
import random |
import multiprocessing |
import numpy as np |
import tempfile |
import tvm |
import tvm.testing
from tvm |
import auto_scheduler
from tvm.auto_scheduler.utils |
import get_const_tuple
from tvm.testing.auto_scheduler |
import (
matmul_auto_scheduler_test,
zero_rank_compute_auto_scheduler_test,
zero_rank_reduce_auto_scheduler_test,
) |
import multiprocessing |
class CustomMeasureCallback(auto_scheduler.measure.PythonBasedMeasureCallback):
"""A simple Python-based callback for testing."""
def callback(self, policy, inputs, results):
assert isinstance(policy, auto_scheduler.search_policy.SearchPolicy)
for inp, res in zip(inputs, results):
assert isinstance(inp, auto_scheduler.MeasureInput)
assert isinstance(res, auto_scheduler.MeasureResult)
def search_common(
task=None,
target="llvm",
search_policy="sketch",
runner="local",
num_measure_trials=100,
cost_model=auto_scheduler.RandomModel(),
init_search_callbacks=None,
):
if task is None:
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(64, 64, 64), target=target
)
target = task.target
print("Test search policy '%s' for '%s'" % (search_policy, target))
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
init_search_callbacks = init_search_callbacks or []
init_search_callbacks.append(auto_scheduler.PreloadMeasuredStates(log_file))
if search_policy == "empty":
search_policy = auto_scheduler.EmptyPolicy(task)
elif search_policy == "sketch":
search_policy = auto_scheduler.SketchPolicy(
task, program_cost_model=cost_model, init_search_callbacks=init_search_callbacks
)
else:
raise ValueError("Invalid policy: " + search_policy)
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=num_measure_trials,
num_measures_per_round=2,
early_stopping=1,
runner=runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file), CustomMeasureCallback()],
)
task.tune(tuning_options=tuning_options, search_policy=search_policy)
sch, args = task.apply_best(log_file)
mod = tvm.build(sch, args, target)
sch, args = task.compute_dag.apply |
_steps_from_state(task.compute_dag.init_state)
mod_ref = tvm.build(sch, args, "llvm")
ctx = tvm.device(str(target), 0)
np_arrays = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype) for x in args]
tvm_arrays = [tvm.nd.array(x, ctx) for x in np_arrays]
mod(*tvm_arrays)
actual = [x.numpy() for x in tvm_arrays]
tvm_arrays = [tvm.nd.array(x) for x in np_arrays]
mod_ref(*tvm_arrays)
expected = [x.numpy() for x in tvm_arrays]
for x, y in zip(actual, expected):
tvm.testing.assert_allclose(x, y, rtol=1e-5)
@tvm.testing.requires_llvm
def test_workload_registry_empty_policy():
search_common(search_policy="empty", num_measure_trials=2)
N = 64
target = "llvm"
search_common(
task=auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test", args=(N, N, N), target=target
),
num_measure_trials=2,
search_policy="empty",
)
search_common(
task=auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test_rename_1", args=(N, N, N), target=target
),
num_measure_trials=2,
search_policy="empty",
)
@tvm.testing.requires_llvm
def test_sketch_search_policy_basic():
search_common()
def sketch_search_policy_basic_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_sketch_search_policy_basic()
@tvm.testing.requires_llvm
def test_sketch_search_policy_basic_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=sketch_search_policy_basic_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_sketch_search_policy_xgbmodel():
search_common(cost_model=auto_scheduler.XGBModel())
@tvm.testing.requires_cuda
def test_sketch_search_policy_cuda_rpc_runner():
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
search_common(target="cuda", runner=measure_ctx.runner)
@tvm.testing.requires_cuda
def test_sketch_search_policy_cuda_xgb |
model_rpc_runner():
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
search_common(target="cuda", runner=measure_ctx.runner, cost_model=auto_scheduler.XGBModel())
@tvm.testing.requires_llvm
@tvm.testing.requires_cuda
def test_sketch_search_policy_zero_rank():
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
for target in ["llvm", "cuda"]:
task = auto_scheduler.SearchTask(
func=zero_rank_compute_auto_scheduler_test, args=(10,), target=target
)
search_common(task, runner=measure_ctx.runner)
task = auto_scheduler.SearchTask(
func=zero_rank_reduce_auto_scheduler_test, args=(10,), target=target
)
search_common(task, runner=measure_ctx.runner)
@tvm.testing.requires_llvm
def test_sketch_search_policy_custom_sketch():
def meet_condition_func(search_policy, state, stage_id):
return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST
def apply_func(search_policy, state, stage_id):
ret = []
state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
C = state.stage_ops[2]
ret.append([state.state_object, -1])
s1 = state.copy()
i, _, _ = s1[C].iters
s1.split(C, i, [8])
ret.append([s1.state_object, -1])
return ret
search_common(
cost_model=auto_scheduler.XGBModel(),
init_search_callbacks=[
auto_scheduler.PreloadCustomSketchRule(meet_condition_func, apply_func)
],
)
if __name__ == "__main__":
test_workload_registry_empty_policy()
test_sketch_search_policy_basic()
test_sketch_search_policy_basic_spawn()
test_sketch_search_policy_xgbmodel()
test_sketch_search_policy_cuda_rpc_runner()
test_sketch_search_policy_cuda_xgbmodel_rpc_runner()
test_sketch_search_policy_zero_rank()
test_sketch_search_policy_custom_sketch() |
"""Test search policy""" |
import numpy as np |
import tempfile |
import tvm |
import tvm.testing
from tvm |
import auto_scheduler
from tvm.auto_scheduler.utils |
import get_const_tuple
from tvm.testing.auto_scheduler |
import (
matmul_auto_scheduler_test,
zero_rank_compute_auto_scheduler_test,
zero_rank_reduce_auto_scheduler_test,
)
def test_search_task_add_task_input():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
N = 64
target = "llvm"
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
test_input_1 = tvm.runtime.ndarray.empty((10, 20))
test_input_2 = tvm.runtime.ndarray.empty((30, 40, 50))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
"test_input_2": test_input_2,
},
task_inputs_overwrite=True,
)
assert len(task.task_input_names) == 3
assert task.task_input_names[0] == "test_input_0"
assert task.task_input_names[1] == "test_input_1"
assert task.task_input_names[2] == "test_input_2"
def test_search_task_record():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
N = 64
target = "llvm"
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test", args=(N, N, N), target=target
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={"test_input_0": test_input_0},
task_inputs_overwrite=True,
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
assert task.wor |
kload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 1
assert new_task.task_input_names[0] == "test_input_0"
test_input_1 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
},
task_inputs_overwrite=True,
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 2
assert new_task.task_input_names[0] == "test_input_0"
assert new_task.task_input_names[1] == "test_input_1"
v5_log = """["[\\\"matmul_auto_scheduler_test\\\", 64, 64, 64]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1]"""
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(v5_log)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 0
def test_recover_measure_input_with_task_input():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = aut |
o_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target="llvm",
task_inputs={
"test_input_0": test_input_0,
},
task_inputs_overwrite=True,
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 1
assert new_task.task_input_names[0] == "test_input_0"
test_input_1 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target="llvm",
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
},
task_inputs_overwrite=True,
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_schedul |
er.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 2
assert new_task.task_input_names[0] == "test_input_0"
assert new_task.task_input_names[1] == "test_input_1"
v5_log = """{"i": [["[\\\"matmul_auto_scheduler_test\\\", 512, 512, 512]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1], [[], []]], "r": [[0.1], 0, 0.2, 1], "v": "v0.6"}"""
measure_log = auto_scheduler.measure_record.load_record_from_string(v5_log)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 0
if __name__ == "__main__":
test_search_task_add_task_input()
test_search_task_record()
test_recover_measure_input_with_task_input() |
""" Test sketch generation. """ |
import sys |
import tvm |
import tvm.testing |
import pytest
from tvm |
import te, auto_scheduler
from tvm.auto_scheduler |
import _ffi_api
from tvm.auto_scheduler.loop_state |
import Stage
from tvm.testing.auto_scheduler |
import (
matmul_auto_scheduler_test,
double_matmul_auto_scheduler_test,
conv2d_nchw_bn_relu_auto_scheduler_test,
max_pool2d_auto_scheduler_test,
min_nm_auto_scheduler_test,
softmax_nm_auto_scheduler_test,
softmax_abcd_auto_scheduler_test,
conv2d_winograd_nhwc_auto_scheduler_test,
zero_rank_reduce_auto_scheduler_test,
)
def generate_sketches(
workload_func, args, target, print_for_debug=False, init_search_callbacks=None
):
task = auto_scheduler.SearchTask(
func=workload_func,
args=args,
target=target,
hardware_params=auto_scheduler.HardwareParams(num_cores=4, target=target),
)
policy = auto_scheduler.SketchPolicy(
task, verbose=0, init_search_callbacks=init_search_callbacks
)
return policy.generate_sketches(print_for_debug)
def assert_compute_at_condition(stage, condition):
assert stage.compute_at == Stage.COMPUTE_AT_TRANS_TABLE[condition]
def assert_is_tiled(stage):
assert _ffi_api.SearchPolicyUtilsIsTiled(stage)
def assert_is_not_tiled(stage):
assert not _ffi_api.SearchPolicyUtilsIsTiled(stage)
def assert_has_cache_write(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasCacheWriteStage(state, stage_id)
def assert_has_cache_read(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasCacheReadStage(state, stage_id)
def assert_has_rfactor(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasRfactorStage(state, stage_id)
def assert_has_cross_thread_reduction(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasCrossThreadReduction(state, stage_id)
def test_cpu_matmul_sketch():
sketches = generate_sketches(matmul_auto_scheduler_test, (512, 512, 512), "llvm")
""" 3 multi-level tiling sketches
No.0 : Multi-level tiling
No.1 : Multi-level tiling with cache write on position 0
No.2 : Multi-level tiling with cache write on position 1
"""
assert len(sketches) == 3
assert_is_tiled(sketches[0].stages[2])
asser |
t_is_tiled(sketches[1].stages[2])
assert_has_cache_write(sketches[1], 2)
assert_compute_at_condition(sketches[1].stages[2], "iter")
assert_is_tiled(sketches[2].stages[2])
assert_has_cache_write(sketches[2], 2)
assert_compute_at_condition(sketches[2].stages[2], "iter")
assert sketches[1] != sketches[2]
sketches = generate_sketches(matmul_auto_scheduler_test, (8, 8, 512), "llvm")
""" 2 rfactor sketches + 3 multi-level tiling sketches
No.0 : Rfactor with factor position 0
No.1 : Rfactor with factor position 1
No.2 : Multi-level tiling
No.3 : Multi-level tiling with cache write on position 0
No.4 : Multi-level tiling with cache write on position 1
"""
assert len(sketches) == 5
assert_has_rfactor(sketches[0], 2)
assert_has_rfactor(sketches[1], 2)
assert sketches[0] != sketches[1]
assert_is_tiled(sketches[2].stages[2])
assert_is_tiled(sketches[3].stages[2])
assert_has_cache_write(sketches[3], 2)
assert_compute_at_condition(sketches[3].stages[2], "iter")
assert_is_tiled(sketches[4].stages[2])
assert_has_cache_write(sketches[4], 2)
assert_compute_at_condition(sketches[4].stages[2], "iter")
assert sketches[3] != sketches[4]
sketches = generate_sketches(double_matmul_auto_scheduler_test, (512,), "llvm")
""" 3 multi-level tiling sketches for one matmul, so 3 * 3 = 9 sketches in total """
assert len(sketches) == 9
assert_is_tiled(sketches[8].stages[5])
def test_cpu_conv2d_bn_relu_sketch():
sketches = generate_sketches(
conv2d_nchw_bn_relu_auto_scheduler_test, (1, 56, 56, 512, 512, 3, 1, 1), "llvm"
)
""" 3 multi-level tiling sketches
No.0 : Conv2d multi-level tiling with fusion on position 0
No.1 : Conv2d multi-level tiling with fusion on position 1
No.2 : Conv2d multi-level tiling without fusion
"""
assert len(sketches) == 3
assert_is_not_tiled(sketches[0].stages[1])
assert_is_tiled(sketche |
s[0].stages[3])
assert_compute_at_condition(sketches[0].stages[3], "iter")
assert_compute_at_condition(sketches[0].stages[5], "inlined")
assert_compute_at_condition(sketches[0].stages[7], "inlined")
assert_compute_at_condition(sketches[0].stages[9], "inlined")
assert_is_tiled(sketches[0].stages[10])
assert_is_not_tiled(sketches[1].stages[1])
assert_is_tiled(sketches[1].stages[3])
assert_compute_at_condition(sketches[1].stages[3], "iter")
assert_compute_at_condition(sketches[1].stages[5], "inlined")
assert_compute_at_condition(sketches[1].stages[7], "inlined")
assert_compute_at_condition(sketches[1].stages[9], "inlined")
assert_is_tiled(sketches[1].stages[10])
assert_is_not_tiled(sketches[2].stages[1])
assert_is_tiled(sketches[2].stages[3])
assert_compute_at_condition(sketches[2].stages[3], "root")
assert_compute_at_condition(sketches[2].stages[5], "inlined")
assert_compute_at_condition(sketches[2].stages[7], "inlined")
assert_compute_at_condition(sketches[2].stages[9], "inlined")
assert_is_not_tiled(sketches[2].stages[10])
def test_cpu_max_pool2d_sketch():
sketches = generate_sketches(max_pool2d_auto_scheduler_test, (1, 56, 56, 512, 1), "llvm")
""" 1 default sketch """
assert len(sketches) == 1
assert len(sketches[0].transform_steps) == 0
def test_cpu_min_sketch():
sketches = generate_sketches(min_nm_auto_scheduler_test, (10, 1024), "llvm")
""" 2 rfactor sketches + 1 default sketch
No.0 : Rfactor with factor position 0
No.1 : Rfactor with factor position 1
No.2 : Default sketch
"""
assert len(sketches) == 3
assert_has_rfactor(sketches[0], 1)
assert_has_rfactor(sketches[1], 1)
assert sketches[0] != sketches[1]
assert len(sketches[2].transform_steps) == 0
def test_cpu_softmax_sketch():
sketches = generate_sketches(softmax_nm_auto_scheduler_test, (1, 1024), "llvm")
""" (2 rfactor sketches + 1 default sketch) * (2 rfactor sketch |
es + 1 default sketch) """
assert len(sketches) == (3 * 3)
for i in range(0, 3):
for j in range(0, 3):
sketch = sketches[i * 3 + j]
if j in [0, 1]:
assert_has_rfactor(sketch, 1)
if i in [0, 1]:
assert_has_rfactor(sketch, 4 if j in [0, 1] else 3)
assert len(sketches[8].transform_steps) == 0
sketches = generate_sketches(softmax_abcd_auto_scheduler_test, (1, 12, 128, 128), "llvm")
""" (2 rfactor sketches + 1 default sketch) * (2 rfactor sketches + 1 default sketch) """
assert len(sketches) == (3 * 3)
for i in range(0, 3):
for j in range(0, 3):
sketch = sketches[i * 3 + j]
if j in [0, 1]:
assert_has_rfactor(sketch, 1)
if i in [0, 1]:
assert_has_rfactor(sketch, 4 if j in [0, 1] else 3)
assert len(sketches[8].transform_steps) == 0
def test_cpu_conv2d_winograd_sketch():
sketches = generate_sketches(
conv2d_winograd_nhwc_auto_scheduler_test, (1, 28, 28, 128, 128, 3, 1, 1), "llvm"
)
""" 3 multi-level tiling sketches
No.0 : Bgemm multi-level tiling
No.1 : Bgemm multi-level tiling with cache write on position 0
No.2 : Bgemm multi-level tiling with cache write on position 1
"""
assert len(sketches) == 3
assert_is_not_tiled(sketches[0].stages[1])
assert_is_not_tiled(sketches[0].stages[2])
assert_compute_at_condition(sketches[0].stages[3], "inlined")
assert_is_tiled(sketches[0].stages[4])
assert_is_tiled(sketches[0].stages[6])
assert_compute_at_condition(sketches[0].stages[7], "inlined")
assert_is_tiled(sketches[0].stages[8])
assert_is_not_tiled(sketches[0].stages[9])
assert_is_not_tiled(sketches[1].stages[1])
assert_is_not_tiled(sketches[1].stages[2])
assert_compute_at_condition(sketches[1].stages[3], "inlined")
assert_is_tiled(sketches[1].stages[4])
assert_is_tiled(sketches[1].stages[6])
assert_has_cache_write(sketches[1], 6) |
assert_compute_at_condition(sketches[1].stages[6], "iter")
assert_compute_at_condition(sketches[1].stages[8], "inlined")
assert_is_tiled(sketches[1].stages[9])
assert_is_not_tiled(sketches[1].stages[10])
assert_is_not_tiled(sketches[2].stages[1])
assert_is_not_tiled(sketches[2].stages[2])
assert_compute_at_condition(sketches[2].stages[3], "inlined")
assert_is_tiled(sketches[2].stages[4])
assert_is_tiled(sketches[2].stages[6])
assert_has_cache_write(sketches[2], 6)
assert_compute_at_condition(sketches[2].stages[6], "iter")
assert_compute_at_condition(sketches[2].stages[8], "inlined")
assert_is_tiled(sketches[2].stages[9])
assert_is_not_tiled(sketches[2].stages[10])
assert sketches[1] != sketches[2]
def test_cpu_zero_rank_sketch():
sketches = generate_sketches(zero_rank_reduce_auto_scheduler_test, (128,), "llvm")
""" 2 rfactor sketches + 1 multi-level tiling sketches """
assert len(sketches) == 3
def test_cpu_custom_sketch():
def meet_condition_func(search_policy, state, stage_id):
return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST
def apply_func(search_policy, state, stage_id):
ret = []
state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
C = state.stage_ops[2]
ret.append([state.state_object, -1])
s1 = state.copy()
i, _, _ = s1[C].iters
s1.split(C, i, [8, 2])
ret.append([s1.state_object, -1])
return ret
sketches = generate_sketches(
matmul_auto_scheduler_test,
(512, 512, 512),
"llvm",
init_search_callbacks=[
auto_scheduler.PreloadCustomSketchRule(meet_condition_func, apply_func)
],
)
assert len(sketches) == 2
assert sketches[0].stages[2].iters[0].range.extent == 512
assert sketches[0].stages[2].iters[1].range.extent == 512
assert sketches[0].stages[2].iters[2].range.extent == 512
assert sketches[1].stages[2].iters[0]. |
range.extent == 32
assert sketches[1].stages[2].iters[1].range.extent == 8
assert sketches[1].stages[2].iters[2].range.extent == 2
assert sketches[1].stages[2].iters[3].range.extent == 512
assert sketches[1].stages[2].iters[4].range.extent == 512
@tvm.testing.requires_cuda
def test_cuda_matmul_sketch():
sketches = generate_sketches(matmul_auto_scheduler_test, (512, 512, 512), "cuda")
""" 1 multi-level tiling sketch """
assert len(sketches) == 1
assert_has_cache_read(sketches[0], 0)
assert_compute_at_condition(sketches[0].stages[1], "iter")
assert_has_cache_read(sketches[0], 2)
assert_compute_at_condition(sketches[0].stages[3], "iter")
assert_has_cache_write(sketches[0], 4)
assert_is_tiled(sketches[0].stages[4])
assert_compute_at_condition(sketches[0].stages[4], "iter")
assert_is_tiled(sketches[0].stages[5])
sketches = generate_sketches(matmul_auto_scheduler_test, (8, 8, 1024), "cuda")
""" 1 cross thread reuction sketch + 1 multi-level tiling sketch """
assert len(sketches) == 2
assert_has_cross_thread_reduction(sketches[0], 2)
assert_has_cache_read(sketches[1], 0)
assert_compute_at_condition(sketches[1].stages[1], "iter")
assert_has_cache_read(sketches[1], 2)
assert_compute_at_condition(sketches[1].stages[3], "iter")
assert_has_cache_write(sketches[1], 4)
assert_is_tiled(sketches[1].stages[4])
assert_compute_at_condition(sketches[1].stages[4], "iter")
assert_is_tiled(sketches[1].stages[5])
sketches = generate_sketches(double_matmul_auto_scheduler_test, (512,), "cuda")
""" 1 multi-level tiling sketch for one matmul, so 1 x 1 = 1 sketch in total """
assert len(sketches) == 1
assert_compute_at_condition(sketches[0].stages[5], "root")
assert_compute_at_condition(sketches[0].stages[6], "iter")
@tvm.testing.requires_cuda
def test_cuda_conv2d_bn_relu_sketch():
sketches = generate_sketches(
conv2d_nchw_bn_relu_auto_scheduler_test, (1, 56, 56, 512, 512, 3, 1, 1), "cuda |
"
)
""" 1 multi-level tiling sketch """
assert len(sketches) == 1
assert_has_cache_read(sketches[0], 1)
assert_compute_at_condition(sketches[0].stages[1], "inlined")
assert_compute_at_condition(sketches[0].stages[2], "iter")
assert_has_cache_read(sketches[0], 3)
assert_compute_at_condition(sketches[0].stages[4], "iter")
assert_is_tiled(sketches[0].stages[5])
assert_compute_at_condition(sketches[0].stages[5], "iter")
assert_compute_at_condition(sketches[0].stages[7], "inlined")
assert_compute_at_condition(sketches[0].stages[9], "inlined")
assert_compute_at_condition(sketches[0].stages[11], "inlined")
assert_is_tiled(sketches[0].stages[12])
@tvm.testing.requires_cuda
def test_cuda_max_pool2d_sketch():
sketches = generate_sketches(max_pool2d_auto_scheduler_test, (1, 56, 56, 512, 0), "cuda")
""" 1 default sketch """
assert len(sketches) == 1
assert len(sketches[0].transform_steps) == 0
@tvm.testing.requires_cuda
def test_cuda_min_sketch():
sketches = generate_sketches(min_nm_auto_scheduler_test, (10, 1024), "cuda")
""" 1 cross thread reuction sketch + 1 default sketch """
assert len(sketches) == 2
assert_has_cross_thread_reduction(sketches[0], 1)
assert len(sketches[1].transform_steps) == 0
@tvm.testing.requires_cuda
def test_cuda_softmax_sketch():
sketches = generate_sketches(softmax_nm_auto_scheduler_test, (2, 1024), "cuda")
""" (1 cross thread reuction sketch + 1 default sketch) * (1 cross thread reuction sketch + 1 default sketch) """
assert len(sketches) == (2 * 2)
assert_has_cross_thread_reduction(sketches[0], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[0], 3)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[1], 3)
assert_has_cross_thread_reduction(sketches[2], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined") |
assert_compute_at_condition(sketches[3].stages[2], "inlined")
sketches = generate_sketches(softmax_abcd_auto_scheduler_test, (1, 12, 128, 128), "cuda")
""" (1 cross thread reuction sketch + 1 default sketch) * (1 cross thread reuction sketch + 1 default sketch) """
assert len(sketches) == (2 * 2)
assert_has_cross_thread_reduction(sketches[0], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[0], 3)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[1], 3)
assert_has_cross_thread_reduction(sketches[2], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_compute_at_condition(sketches[3].stages[2], "inlined")
@tvm.testing.requires_cuda
def test_cuda_conv2d_winograd_sketch():
sketches = generate_sketches(
conv2d_winograd_nhwc_auto_scheduler_test, (1, 28, 28, 128, 128, 3, 1, 1), "cuda"
)
""" 1 multi-level tiling sketch """
assert len(sketches) == 1
assert_compute_at_condition(sketches[0].stages[1], "inlined")
assert_compute_at_condition(sketches[0].stages[2], "iter")
assert_compute_at_condition(sketches[0].stages[3], "inlined")
assert_is_tiled(sketches[0].stages[4])
assert_has_cache_read(sketches[0], 4)
assert_compute_at_condition(sketches[0].stages[5], "iter")
assert_has_cache_read(sketches[0], 6)
assert_compute_at_condition(sketches[0].stages[7], "iter")
assert_is_tiled(sketches[0].stages[8])
assert_compute_at_condition(sketches[0].stages[8], "iter")
assert_has_cache_write(sketches[0], 8)
assert_compute_at_condition(sketches[0].stages[9], "root")
assert_is_tiled(sketches[0].stages[11])
assert_is_not_tiled(sketches[0].stages[12])
@tvm.testing.requires_cuda
def test_cuda_zero_rank_sketch():
sketches = generate_sketches(zero_rank_reduce_auto_scheduler_test, (128,), "cuda")
""" 1 cross thread reuction sketch + 1 multi-level tiling sket |
ch """
assert len(sketches) == 2
if __name__ == "__main__":
tvm.testing.main() |
""" Test task scheduler """ |
import tempfile |
import multiprocessing |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import auto_scheduler
from tvm.testing.auto_scheduler |
import matmul_auto_scheduler_test
@tvm.testing.requires_llvm
def test_task_scheduler_round_robin():
tasks = []
for n in [2, 4, 8]:
tasks.append(
auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(n, n, n), target="llvm"
)
)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
num_trials_per_task = 2
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=num_trials_per_task * len(tasks),
runner=measure_ctx.runner,
num_measures_per_round=1,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
task_scheduler = auto_scheduler.TaskScheduler(tasks, strategy="round-robin", callbacks=[])
task_scheduler.tune(tune_option, search_policy="sketch.random")
counters = {}
for task in tasks:
counters[task.workload_key] = 0
for inp, _ in auto_scheduler.load_records(log_file):
counters[inp.task.workload_key] += 1
for task in tasks:
assert counters[task.workload_key] == num_trials_per_task
task_scheduler = auto_scheduler.TaskScheduler(
tasks, strategy="round-robin", load_log_file=log_file, callbacks=[]
)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=len(tasks),
num_measures_per_round=1,
)
task_scheduler.tune(tune_option, search_policy="sketch.random")
del measure_ctx
@tvm.testing.requires_llvm
def task_scheduler_round_robin_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_task_scheduler_round_robin()
@tvm.testing.requires_llvm
def test_task_scheduler_round_robin_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=task_scheduler_round_robin_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def te |
st_task_scheduler_gradient():
tasks = []
for n in [2, 4]:
tasks.append(
auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(n, n, n), target="llvm"
)
)
def objective_func(costs):
return 1e5 * costs[0]
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
n_trials = 5
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=n_trials,
runner=measure_ctx.runner,
num_measures_per_round=1,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
task_scheduler = auto_scheduler.TaskScheduler(
tasks, objective_func=objective_func, callbacks=[]
)
task_scheduler.best_costs = np.array([1e2, 1e-8])
task_scheduler.tune(tune_option, search_policy="sketch.random")
counters = {}
for task in tasks:
counters[task.workload_key] = 0
for inp, _ in auto_scheduler.load_records(log_file):
counters[inp.task.workload_key] += 1
assert counters[tasks[0].workload_key] == n_trials - 1
assert counters[tasks[1].workload_key] == 1
del measure_ctx
if __name__ == "__main__":
test_task_scheduler_round_robin()
test_task_scheduler_round_robin_spawn()
test_task_scheduler_gradient() |
"""Test database""" |
import copy |
import logging
from tvm.autotvm |
import database
from tvm.autotvm.record |
import encode, MeasureResult
from tvm.testing.autotvm |
import get_sample_records
def test_save_load():
logging.info("test basic db load/save ...")
records = get_sample_records(3)
inp1, res1 = records[0]
inp2, res2 = records[1]
inp3, _ = records[2]
_db = database.DummyDatabase()
_db.flush()
_db.save(inp1, res1)
_db.save(inp2, res2)
load1 = _db.load(inp1)
load2 = _db.load(inp2)
load3 = _db.load(inp3)
assert load1 == res1
assert load2 == res2
assert load3 is None
assert load1 != load2
TRIAL_LIMIT = 2
def test_db_hash():
logging.info("test db hash check ...")
inp1, res1 = get_sample_records(1)[0]
inp2 = copy.deepcopy(inp1)
inp1.config.code_hash = "cafecafe"
inp2.config.code_hash = "dbffdbff"
res2l = list(tuple(res1))
res2l[-1] = -1
res2 = MeasureResult(*res2l)
_db = database.DummyDatabase()
_db.flush()
_db.save(inp1, res1, extend=True)
_db.save(inp2, res2, extend=True)
load1 = _db.load(inp1)
load2 = _db.load(inp2)
assert load1 != load2
assert load1.timestamp != -1
assert load2.timestamp == -1
def test_db_latest_all():
logging.info("test db load w/ multiple results ...")
inp1, res1 = get_sample_records(1)[0]
lis1 = list(tuple(res1))
lis2 = list(tuple(res1))
lis3 = list(tuple(res1))
lis1[-1] = 0.0
lis2[-1] = 1.1
lis3[-1] = 9999.9999
res1 = MeasureResult(*lis1)
res2 = MeasureResult(*lis2)
res3 = MeasureResult(*lis3)
_db = database.DummyDatabase()
_db.flush()
_db.save(inp1, res1, extend=True)
load1 = _db.load(inp1)
assert load1.timestamp == 0.0
_db.save(inp1, res2, extend=True)
load2 = _db.load(inp1)
assert load2.timestamp == 1.1
_db.save(inp1, res3, extend=True)
load3 = _db.load(inp1)
assert load3.timestamp == 9999.9999
load4 = _db.load(inp1, get_all=True)
assert encode(inp1, load4[0]) == encode(inp1, res1)
assert encode(inp1, load4[1]) == encode(inp1, res2)
assert encode(inp1, load4[2]) == encode(inp1, res3)
def te |
st_db_filter():
logging.info("test db filter ...")
records = get_sample_records(5)
_db = database.DummyDatabase()
_db.flush()
for inp, result in records:
_db.save(inp, result)
records = _db.filter(lambda inp, ress: any(r.costs[0] <= 2 for r in ress))
assert len(records) == 2
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
test_save_load()
test_db_hash()
test_db_latest_all()
test_db_filter() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test dispatcher.
The dispatcher can choose which template to use according
to the parameters of workload"""
from tvm import autotvm
@autotvm.template("testing/dispatch_fallback")
def simple_template(a, b):
cfg = autotvm.get_config()
assert cfg.is_fallback
def test_fallback():
simple_template(2, 3)
if __name__ == "__main__":
test_fallback()
|
"""Test feature extraction""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm.autotvm |
import feature
def test_iter_feature_gemm():
N = 128
k = te.reduce_axis((0, N), "k")
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
C = te.compute(A.shape, lambda y, x: te.sum(A[y, k] * B[k, x], axis=k), name="C")
s = te.create_schedule(C.op)
feas = feature.get_itervar_feature(s, [A, B, C], take_log=False)
expected = [
{
"_attr_": [128, 1, 128, 2097152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
"A_0": [128, -1, 16384, 128, 0, 0],
"B_0": [0, -1, 16384, 128, 0, 0],
"C_0": [128, -1, 16384, 128, 0, 0],
"C_1": [128, -1, 16384, 128, 0, 0],
},
{
"_attr_": [128, 2, 16384, 16384, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
"A_0": [0, -1, 128, 128, 0, 0],
"B_0": [1, -1, 16384, 1, 0, 0],
"C_0": [1, -1, 128, 128, 0, 0],
"C_1": [1, -1, 128, 128, 0, 0],
},
{
"_attr_": [128, 3, 2097152, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
"A_0": [1, -1, 128, 1, 0, 0],
"B_0": [128, -1, 128, 1, 0, 0],
"C_1": [0, -1, 1, 128, 0, 0],
"C_2": [0, -1, 1, 128, 0, 0],
},
]
for ans, row in zip(expected, feas):
for pair in row:
if pair[0] not in ans:
continue
assert ans[pair[0]] == pair[1:], "%s: %s vs %s" % (pair[0], ans[pair[0]], pair[1:])
def test_curve_feature_gemm():
N = 128
k = te.reduce_axis((0, N), "k")
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
C = te.compute(A.shape, lambda y, x: te.sum(A[y, k] * B[k, x], axis=k), name="C")
s = te.create_schedule(C.op)
feas = feature.get_buffer_curve_sample_flatten(s, [A, B, C], sample_n=30)
assert len(feas) == 30 * 3 * 4 * 2
def test_feature_shape():
"""test the dimensions of flatten feature are the same"""
N = 1024
n_sample = 100
def get_gemm_feature(target):
k = te.reduce_axis((0, N |
), "k")
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
C = te.compute(A.shape, lambda y, x: te.sum(A[y, k] * B[k, x], axis=k), name="C")
s = te.create_schedule(C.op)
y, x = s[C].op.axis
axes = list(s[C].tile(y, x, 8, 8)) + [k]
perm = np.random.permutation(5)
axes = [axes[x] for x in perm]
s[C].reorder(*axes)
if "gpu" in target.keys:
pick = []
for i in range(len(perm)):
if perm[i] != 4:
pick.append(axes[i])
s[C].bind(pick[0], te.thread_axis("blockIdx.x"))
s[C].bind(pick[1], te.thread_axis("vthread"))
s[C].bind(pick[2], te.thread_axis("threadIdx.y"))
with target:
feas = feature.get_itervar_feature(s, [A, B, C])
feas = feature.flatten_itervar_feature(feas)
return feas
targets = [
tvm.target.cuda(),
tvm.target.mali(),
tvm.target.arm_cpu(),
]
for target in targets:
dim = len(get_gemm_feature(target))
for i in range(n_sample):
assert dim == len(get_gemm_feature(target)), (
"dimensions of feature do not match" " for different configurations"
)
if __name__ == "__main__":
test_iter_feature_gemm()
test_curve_feature_gemm()
test_feature_shape() |
"""Test flop calculation""" |
import tvm
from tvm |
import te |
import numpy as np
from tvm.autotvm.task.task |
import compute_flop
def random_dtypes():
"""Return pair of (input, accumulator) dtypes"""
candidates = [("float32", "float32"), ("float16", "float32"), ("int8", "int32")]
return candidates[np.random.choice(len(candidates))]
def test_conv():
for i in range(5):
N, H, W, CO, CI, KH, KW = [np.random.randint(10, 32) for _ in range(7)]
(input_dtype, acc_dtype) = random_dtypes()
D = te.placeholder((N, CI, H, W), dtype=input_dtype)
K = te.placeholder((CO, CI, KH, KW), dtype=input_dtype)
KH = min(H, KH)
KW = min(W, KW)
ci = te.reduce_axis((0, CI))
kh = te.reduce_axis((0, KH))
kw = te.reduce_axis((0, KW))
OH = (H - KH) + 1
OW = (W - KW) + 1
C = te.compute(
(N, CO, OH, OW),
lambda n, co, h, w: te.sum(
D[n][ci][h][w].astype(acc_dtype) * K[co][ci][h][w].astype(acc_dtype),
axis=[ci, kh, kw],
),
)
s = te.create_schedule([C.op])
assert compute_flop(s) == 2 * N * CO * OH * OW * CI * KH * KW
def test_pack_gemm():
for i in range(5):
N, L, M = [np.random.randint(10, 128) * 4 for _ in range(3)]
(input_dtype, acc_dtype) = random_dtypes()
A = te.placeholder((N, L), dtype=input_dtype)
B = te.placeholder((M, L), dtype=input_dtype)
k = te.reduce_axis((0, L))
bn = 4
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
A_pack = te.compute((N
B_pack = te.compute((M
C_pack = te.compute(
(N
lambda i, j, ii, jj: te.sum(
A_pack[i, k, ii].astype(acc_dtype) * B_pack[j, k, jj].astype(acc_dtype), axis=[k]
),
)
C = te.compute(
(N, M), lambda i, j: C_pack[idxd(i, bn)][idxd(j, bn)][idxm(i, bn)][idxm(j, bn)]
)
s = te.create_schedule([C.op])
assert compute_flop(s) == 2 * N * L * M
def test_outer_dot():
for i in range(5):
N, M = [np.ran |
dom.randint(10, 128) * 4 for _ in range(2)]
(input_dtype, acc_dtype) = random_dtypes()
A = te.placeholder((N,), dtype=input_dtype)
B = te.placeholder((M,), dtype=input_dtype)
C = te.compute((N, M), lambda i, j: A[i].astype(acc_dtype) * B[j].astype(acc_dtype))
s = te.create_schedule([C.op])
assert compute_flop(s) == N * M
def test_max_pool():
for i in range(5):
N, H, W, CO, CI, KH, KW = [np.random.randint(10, 32) for _ in range(7)]
(input_dtype, _) = random_dtypes()
D = te.placeholder((N, CI, H, W), dtype=input_dtype)
KH = min(H, KH)
KW = min(W, KW)
kh = te.reduce_axis((0, KH))
kw = te.reduce_axis((0, KW))
OH = (H - KH) + 1
OW = (W - KW) + 1
C = te.compute(
(N, CO, OH, OW), lambda n, co, h, w: tvm.te.max(D[n][co][h + kh][w + kw], axis=[kh, kw])
)
s = te.create_schedule([C.op])
assert compute_flop(s) == N * CO * OH * OW * KH * KW
def test_average_pool():
for i in range(5):
N, H, W, CO, CI, KH, KW = [np.random.randint(10, 32) for _ in range(7)]
(input_dtype, acc_dtype) = random_dtypes()
D = te.placeholder((N, CI, H, W), dtype=input_dtype)
KH = min(H, KH)
KW = min(W, KW)
kh = te.reduce_axis((0, KH))
kw = te.reduce_axis((0, KW))
OH = (H - KH) + 1
OW = (W - KW) + 1
C = te.compute(
(N, CO, OH, OW),
lambda n, co, h, w: te.sum(
te.div(D[n][co][h + kh][w + kw].astype(acc_dtype), (KW * KH)), axis=[kh, kw]
),
)
s = te.create_schedule([C.op])
assert compute_flop(s) == 2 * N * CO * OH * OW * KH * KW
def test_move():
"""No float number operation in simple move. So the estimator should raise an error"""
N = 1024
A = te.placeholder((N,))
C = te.compute((N,), lambda i: A[i])
s = te.create_schedule([C.op])
try:
compute_flop(s)
assert False
excep |
t RuntimeError:
pass
if __name__ == "__main__":
test_conv()
test_pack_gemm()
test_outer_dot()
test_move() |
"""Test genetic algorithm tuner"""
from tvm.testing.autotvm |
import DummyRunner, get_sample_task
from tvm |
import autotvm
def test_ga_tuner():
"""Test GATuner"""
task, _ = get_sample_task()
tuner = autotvm.tuner.GATuner(task, pop_size=32)
valid_indexes = list(
filter(lambda idx: tuner.space.is_index_valid(idx), range(tuner.space.range_length))
)
assert tuner.visited.issubset(valid_indexes)
assert tuner.pop_size == len(tuner.visited) == len(tuner.genes)
assert len(tuner.space) == 64
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
tuner.tune(n_trial=len(tuner.space), measure_option=measure_option)
assert tuner.visited.issubset(valid_indexes)
task, _ = get_sample_task()
tuner = autotvm.tuner.GATuner(task, pop_size=100)
valid_indexes = list(
filter(lambda idx: tuner.space.is_index_valid(idx), range(tuner.space.range_length))
)
assert tuner.visited.issubset(valid_indexes)
assert tuner.pop_size == len(tuner.visited) == len(tuner.genes)
assert len(tuner.space) == 64
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
tuner.tune(n_trial=len(tuner.space), measure_option=measure_option)
assert tuner.visited.issubset(valid_indexes)
task, _ = get_sample_task()
task.config_space.multi_filter(
filter=lambda entity: 8 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024
)
tuner = autotvm.tuner.GATuner(task, pop_size=32)
valid_indexes = list(
filter(lambda idx: tuner.space.is_index_valid(idx), range(tuner.space.range_length))
)
assert tuner.visited.issubset(valid_indexes)
assert tuner.pop_size == len(tuner.visited) == len(tuner.genes)
assert len(tuner.space) == 43
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
tuner.tune(n_trial=len(tuner.space), measure_option=measure_option)
assert tuner.visited.issubset(valid_indexes)
task, _ = get_sample_task()
task.config_space.multi_filter(
filter |
=lambda entity: 8 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024
)
tuner = autotvm.tuner.GATuner(task, pop_size=100)
valid_indexes = list(
filter(lambda idx: tuner.space.is_index_valid(idx), range(tuner.space.range_length))
)
assert tuner.visited.issubset(valid_indexes)
assert tuner.pop_size == len(tuner.visited) == len(tuner.genes)
assert len(tuner.space) == 43
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
tuner.tune(n_trial=len(tuner.space), measure_option=measure_option)
assert tuner.visited.issubset(valid_indexes)
if __name__ == "__main__":
test_ga_tuner() |
import os |
import copy |
import numpy as np |
import tvm
from tvm |
import te |
import tvm.relay.testing
from tvm |
import autotvm
from tvm |
import relay
from tvm.autotvm.task |
import ConfigEntity
from tvm.autotvm.measure |
import MeasureResult, MeasureInput
from tvm.autotvm.graph_tuner |
import DPTuner, PBQPTuner
def _create_args(dshape, kshape, strides, padding, dilation, layout, out_layout, dtype, out_dtype):
data = tvm.te.placeholder(dshape, dtype=dtype)
kernel = tvm.te.placeholder(kshape, dtype=dtype)
return autotvm.task.serialize_args(
[data, kernel, strides, padding, dilation, layout, layout, out_dtype]
)
def _create_data(target, dshape, dtype, layout):
data = relay.var("data", shape=dshape, dtype=dtype)
w0 = relay.var("w0_weight")
conv0 = relay.nn.conv2d(data, w0, channels=16, kernel_size=(3, 3), padding=(1, 1))
w1 = relay.var("w1_weight")
conv1 = relay.nn.conv2d(conv0, w1, channels=32, kernel_size=(1, 1))
w2 = relay.var("w2_weight")
conv2 = relay.nn.conv2d(conv1, w2, channels=32, kernel_size=(3, 3), padding=(1, 1))
out = relay.add(conv1, conv2)
net = relay.Function(relay.analysis.free_vars(out), out)
mod, params = relay.testing.create_workload(net)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),)
)
new_args = [
_create_args(
(1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
_create_args(
(1, 16, 8, 8),
(32, 16, 1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 1),
layout,
layout,
dtype,
dtype,
),
_create_args(
(1, 32, 8, 8),
(32, 32, 3, 3),
(1, 1),
(1, 1, 1, 1),
(1, 1),
layout,
layout,
dtype,
dtype,
),
]
costs = [0.04, 0.012, 0.03]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [3, 1]],
["tile_oc", "sp", [4, 4]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(Co |
nfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [2, 8]],
["tile_oc", "sp", [1, 32]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [8, 4]],
["tile_oc", "sp", [4, 8]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
records = []
for args, cost, config, task in zip(new_args, costs, config_list, tasks):
task.args = args
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
ltf_records = []
ltf_arg = [te.placeholder((1, 64, 16, 16, 8), dtype=dtype), "NCHW8c", "NCHW512c"]
ltf_task = autotvm.task.create("layout_transform", ltf_arg, target)
ms_input = MeasureInput(target=target, task=ltf_task, config=None)
ms_output = MeasureResult(costs=(1.91224744e-05,), error_no=0, all_cost=-1, timestamp=-1)
ltf_records.append((ms_input, ms_output))
ltf_keys = []
ltf_arg = [te.placeholder((1, 4, 8, 8, 4), dtype=dtype), "NCHW4c", "NCHW8c"]
ltf_wkl = autotvm.task.args_to_workload(ltf_arg, "layout_transform")
ltf_keys.append(ltf_wkl)
ltf_arg = [te.placeholder((1, 1, 8, 8, 32), dtype=dtype), "NCHW32c", "NCHW4c"]
ltf_wkl = autotvm.task.args_to_workload(ltf_arg, "layout_transform")
ltf_keys.append(ltf_wkl)
ltf_arg = [te.placeholder((1, 4, 8, 8, 8), dtype=dtype), "NCHW8c", "NCHW32c"]
ltf_wkl = autotvm.task.args_to_workload(ltf_arg, "layout_transform")
ltf_keys.append(ltf_wkl)
return net, records, ltf_records, ltf_keys, tasks
def test_graph_tuner_layout |
_transform():
log_file = "%s/test_tuner.log" % (os.getcwd())
target = "llvm"
dshape = (1, 3, 8, 8)
dtype = "float32"
layout = "NCHW"
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
g, records, ltf_records, ltf_keys, _ = _create_data(target, dshape, dtype, layout)
executor = DPTuner(g, {"data": dshape}, records, target_ops, target=target, log_file=log_file)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
out = executor._layout_transform_perf_records
num_flops = 0
total_time = 0
for record in ltf_records:
ltf_wkl = record[0].task.workload
input_shape = ltf_wkl[1][1]
flops = np.prod(input_shape)
num_flops += flops
total_time += record[1].costs[0]
avg_time = total_time / num_flops
for ltf_workload in out:
input_shape = ltf_workload[1][1]
flops = 1
for i in input_shape:
flops *= i
expected_time = flops * avg_time
out_time = out[ltf_workload][1].costs[0]
assert (
expected_time == out_time
), "Inferred layout transformation time mismatch for %s: " "expecting %f but got %f" % (
str(ltf_workload),
expected_time,
out_time,
)
def test_graph_tuner_layout_transform_runner():
log_file = "%s/test_tuner.log" % (os.getcwd())
target = "llvm"
dshape = (1, 3, 8, 8)
dtype = "float32"
layout = "NCHW"
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
g, records, ltf_records, ltf_keys, _ = _create_data(target, dshape, dtype, layout)
executor = DPTuner(g, {"data": dshape}, records, target_ops, target=target, log_file=log_file)
runner = autotvm.LocalRunner(number=100, repeat=1, timeout=10)
executor.benchmark_layout_transform(
layout_records=ltf_records, infer_layout=True, runner=runner
)
out = executor._layout_transform_perf_records
num_flops = 0
total_time = 0
for record in ltf_records: |
ltf_wkl = record[0].task.workload
input_shape = ltf_wkl[1][1]
flops = np.prod(input_shape)
num_flops += flops
total_time += record[1].costs[0]
avg_time = total_time / num_flops
for ltf_workload in out:
input_shape = ltf_workload[1][1]
flops = 1
for i in input_shape:
flops *= i
expected_time = flops * avg_time
out_time = out[ltf_workload][1].costs[0]
assert (
expected_time == out_time
), "Inferred layout transformation time mismatch for %s: " "expecting %f but got %f" % (
str(ltf_workload),
expected_time,
out_time,
)
def test_DPTuner_run():
log_file = "%s/test_tuner.log" % (os.getcwd())
target = "llvm"
dtype = "float32"
layout = "NCHW"
dshape = (1, 3, 8, 8)
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout)
mod = tvm.IRModule()
mod["main"] = g
costs = [0.02, 0.02, 0.045]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity. |
from_json_dict(cfg_dict))
for cost, config, task in zip(costs, config_list, tasks):
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
executor = DPTuner(mod, {"data": dshape}, records, target_ops, target, log_file=log_file)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
assert os.path.isfile(log_file), "No log file with name %s exists." % log_file
def test_PBQPTuner_run():
target = "llvm"
dtype = "float32"
layout = "NCHW"
dshape = (1, 3, 8, 8)
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout)
costs = [0.02, 0.02, 0.045]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
[" |
unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
for cost, config, task in zip(costs, config_list, tasks):
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
executor = PBQPTuner(g, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
def test_many_sub_graphs():
target = "llvm"
dtype = "float32"
dshape = (1, 8, 8, 3)
layout = "NCHW"
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
data = relay.var("data", shape=dshape, dtype=dtype)
t0 = relay.transpose(data, (0, 3, 1, 2))
w0 = relay.var("w0_weight")
conv0 = relay.nn.conv2d(t0, w0, channels=16, kernel_size=(3, 3), padding=(1, 1))
t1 = relay.transpose(conv0, (0, 2, 3, 1))
w1 = relay.var("w1_weight")
t2 = relay.transpose(t1, (0, 3, 1, 2))
conv1 = relay.nn.conv2d(t2, w1, channels=32, kernel_size=(1, 1))
t3 = relay.transpose(conv1, (0, 2, 3, 1))
w2 = relay.var("w2_weight")
t4 = relay.transpose(t3, (0, 3, 1, 2))
conv2 = relay.nn.conv2d(t4, w2, channels=32, kernel_size=(3, 3), padding=(1, 1))
t5 = relay.transpose(conv2, (0, 2, 3, 1))
out = relay.add(t3, t5)
net = relay.Function(relay.analysis.free_vars(out), out)
net, params = relay.testing.create_workload(net)
tasks = autotvm.task.extract_from_program(
net["main"], target=target, params=params, ops=(conv2d,)
)
new_args = [
_create_args(
(1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, |
1), (1, 1), layout, layout, dtype, dtype
),
_create_args(
(1, 16, 8, 8),
(32, 16, 1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 1),
layout,
layout,
dtype,
dtype,
),
_create_args(
(1, 32, 8, 8),
(32, 32, 3, 3),
(1, 1),
(1, 1, 1, 1),
(1, 1),
layout,
layout,
dtype,
dtype,
),
]
costs = [0.04, 0.012, 0.03, 0.02, 0.02, 0.045]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [3, 1]],
["tile_oc", "sp", [4, 4]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [2, 8]],
["tile_oc", "sp", [1, 32]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [8, 4]],
["tile_oc", "sp", [4, 8]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]], |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.