text
stringlengths 1
2.05k
|
---|
import Target
@tvm.script.ir_module
class MockModule:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (16,), "float32")
B = T.match_buffer(b, (16,), "float32")
for i in T.serial(0, 16):
with T.block("matmul"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def _has_torch(): |
import importlib.util
spec = importlib.util.find_spec("torch")
return spec is not None
requires_torch = pytest.mark.skipif(not _has_torch(), reason="torch is not installed")
def test_meta_schedule_dynamic_loop_extent():
a = relay.var("a", shape=(1, 8, 8, 512), dtype="float32")
b = relay.nn.adaptive_avg_pool2d(a, (7, 7), "NHWC")
mod = IRModule({"main": relay.Function([a], b)})
extracted_tasks = ms.relay_integration.extract_tasks(mod, target="llvm", params={})
assert not extracted_tasks
@requires_torch
def test_meta_schedule_integration_extract_from_resnet():
mod, params, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224])
extracted_tasks = ms.relay_integration.extract_tasks(mod, target="llvm", params=params)
expected_task_names = [
"fused_" + s
for s in [
"nn_max_pool2d",
"nn_adaptive_avg_pool2d",
"nn_dense_add",
"nn_conv2d_add",
"nn_conv2d_add_1",
"nn_conv2d_add_2",
"nn_conv2d_add_add_nn_relu",
"nn_conv2d_add_add_nn_relu_1",
"nn_conv2d_add_nn_relu",
"nn_conv2d_add_nn_relu_1",
"nn_conv2d_add_nn_relu_2",
"nn_conv2d_add_nn_relu_3",
"nn_conv2d_add_nn_relu_4",
"nn_conv2d_add_nn_relu_5",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu_1",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1",
"layout_transform",
"layout_transform_reshape_squeeze",
]
]
assert len(extracted_tasks) == len(expected_task_names)
for t in extracted_tasks:
assert t.task_name in expected_task_names, t.task_name
@requires_torch
def test_task_extraction_anchor_block():
mod, params, _ = get_network(name="resnet_18", inp |
ut_shape=[1, 3, 224, 224])
extracted_tasks = ms.relay_integration.extract_tasks(
mod, target="llvm", params=params, module_equality="anchor-block"
)
expected_task_names = [
"fused_" + s
for s in [
"nn_max_pool2d",
"nn_adaptive_avg_pool2d",
"nn_dense_add",
"nn_conv2d_add",
"nn_conv2d_add_1",
"nn_conv2d_add_2",
"nn_conv2d_add_nn_relu",
"nn_conv2d_add_nn_relu_1",
"nn_conv2d_add_nn_relu_2",
"nn_conv2d_add_nn_relu_3",
"nn_conv2d_add_nn_relu_4",
"nn_conv2d_add_nn_relu_5",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1",
"layout_transform",
"layout_transform_reshape_squeeze",
]
]
assert len(extracted_tasks) == len(expected_task_names)
for t in extracted_tasks:
assert t.task_name in expected_task_names, t.task_name
@requires_torch
def test_meta_schedule_integration_extract_from_bert_base():
pytest.importorskip(
"transformers", reason="transformers package is required to |
import bert_base"
)
expected = {
"fused_nn_dense_2": (
12,
[[64, 3072], [768, 3072], [64, 768]],
),
"fused_nn_dense": (
48,
[[64, 768], [768, 768], [64, 768]],
),
"fused_nn_dense_1": (
12,
[[64, 768], [3072, 768], [64, 3072]],
),
"fused_subtract_add_rsqrt_multiply_multiply_add": (
25,
[[1, 64, 768], [1, 64, 1], [1, 64, 1], [768], [768], [1, 64, 768]],
),
"fused_nn_batch_matmul": (
24,
[[12, 64, 64], [12, 64, 64], [12, 64, 64]],
),
"fused_reshape_add_add": (
24,
[[64, 768], [768], [1, 64, 768], [1, 64, 768]],
),
"fused_variance": (
25,
[[1, 64, 768], [1, 64, 1], [1, 64, 1]],
),
"fused_mean": (
25,
[[1, 64, 768], [1, 64, 1]],
),
"fused_reshape_add_reshape_transpose_reshape": (
12,
[[64, 768], [768], [12, 64, 64]],
),
"fused_reshape_add_multiply_fast_erf_multiply_add_multiply_reshape": (
12,
[[64, 3072], [3072], [64, 3072]],
),
"fused_nn_fast_softmax": (
12,
[[1, 12, 64, 64], [1, 12, 64, 64]],
),
"fused_reshape_add_reshape_transpose_reshape_1": (
24,
[[64, 768], [768], [12, 64, 64]],
),
"fused_reshape_divide_add": (
12,
[[12, 64, 64], [1, 1, 1, 64], [1, 12, 64, 64]],
),
"fused_reshape_transpose_reshape": (
12,
[[12, 64, 64], [64, 768]],
),
"fused_nn_dense_add_fast_tanh": (
1,
[[1, 768], [768, 768], [1, 768], [1, 768]],
),
"fused_cast_take_add": (
1,
[[1, 64], [30522, 768], [1, 64, 768], [1, 64, 768]],
),
"fused_take": (
1,
[[1, 64, 768], [1, 768]], |
),
"fused_reshape": (
12,
[[1, 12, 64, 64], [12, 64, 64]],
),
"fused_reshape_1": (
24,
[[1, 64, 768], [64, 768]],
),
}
mod, params, _ = get_network(name="bert_base", input_shape=[1, 64])
extracted_tasks = ms.relay_integration.extract_tasks(mod, target="llvm", params=params)
assert len(extracted_tasks) == len(expected)
for t in extracted_tasks:
prim_func = None
for _, v in t.dispatched[0].functions.items():
prim_func = v
shape = [[int(x) for x in prim_func.buffer_map[b].shape] for b in prim_func.params]
assert t.task_name in expected
expected_weight, expected_shape = expected[t.task_name]
assert expected_weight == t.weight, t.task_name
assert expected_shape == shape, t.task_name
@requires_torch
def test_meta_schedule_integration_extract_from_resnet_with_filter_func():
@register_func("relay.backend.tir_converter.remove_purely_spatial", override=True)
def filter_func(args, _) -> bool:
from tvm.te |
import create_prim_func
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, te.PlaceholderOp):
pass
elif isinstance(t.op, te.ComputeOp):
has_complex_op = has_complex_op or any(isinstance(e, tir.Reduce) for e in t.op.body)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in args:
traverse(t)
if not has_complex_op:
return None
return create_prim_func(args)
mod, params, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224])
extracted_tasks = ms.relay_integration.extract_tasks(
mod,
target="llvm",
params=params,
pass_config={
"relay.backend.use_meta_schedule": True,
"relay.backend.tir_converter": "remove_purely_spatial",
},
)
expected_task_names = [
"fused_" + s
for s in [
"nn_max_pool2d",
"nn_adaptive_avg_pool2d",
"nn_dense_add",
"nn_conv2d_add",
"nn_conv2d_add_1",
"nn_conv2d_add_2",
"nn_conv2d_add_add_nn_relu",
"nn_conv2d_add_add_nn_relu_1",
"nn_conv2d_add_nn_relu",
"nn_conv2d_add_nn_relu_1",
"nn_conv2d_add_nn_relu_2",
"nn_conv2d_add_nn_relu_3",
"nn_conv2d_add_nn_relu_4",
"nn_conv2d_add_nn_relu_5",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu_1",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1",
]
]
assert len(extracted_tasks) == len(expect |
ed_task_names)
for t in extracted_tasks:
assert t.task_name in expected_task_names, t.task_name
@pytest.mark.skip("Too slow on CI")
def extract_task_qbert():
def _test(mod, params, target):
extracted_tasks = ms.relay_integration.extract_tasks(mod, target, params)
tune_tasks = list(
filter(
lambda task: "dense" in task.task_name or "batch_matmul" in task.task_name,
extracted_tasks,
)
)
assert len(tune_tasks) == 6
for task in tune_tasks:
relay_func = list(task.mod.functions.values())[0]
out_type = relay_func.body.checked_type
if out_type.dtype == "float32":
continue
sch = tvm.tir.Schedule(_normalize_mod(task.dispatched[0]))
block = sch.get_block("compute")
annotations = sch.get(block).annotations
assert "schedule_rule" in annotations
assert "vnni" in annotations["schedule_rule"]
mod, params, _ = load_quantized_bert_base(batch_size=1, seq_len=128)
_test(mod, params, target="llvm -mcpu=cascadelake")
@tvm.testing.skip_if_32bit(reason="Apparently the LLVM version on i386 image is too old")
def test_extract_task_arm_conv2d_nchwc():
data_shape = (1, 64, 128, 128)
weight_shape = (32, 64, 1, 1)
bias_shape = (weight_shape[0],)
padding = (1, 1)
data = relay.var("data", shape=data_shape, dtype="int8")
weight = relay.var("weight", shape=weight_shape, dtype="int8")
bias = relay.var("bias", shape=bias_shape, dtype="int32")
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=weight_shape[2:],
channels=weight_shape[0],
padding=padding,
strides=(1, 1),
out_dtype="int32",
)
bias_add = relay.nn.bias_add(conv2d, bias)
relay_mod = tvm.IRModule.from_expr(bias_add)
weight_np = np.random.uniform(1, 10, size=weight_shape).astype("int8")
bias_np = np.random.uniform(1, 10, siz |
e=bias_shape).astype("int32")
params = {"weight": weight_np, "bias": bias_np}
target = "llvm -device arm_cpu -mtriple aarch64-linux-gnu -mattr=+neon"
extracted_tasks = ms.relay_integration.extract_tasks(relay_mod, target, params)
tune_tasks = list(
filter(
lambda task: "conv2d" in task.task_name,
extracted_tasks,
)
)
assert len(tune_tasks) == 1
relay_func = list(tune_tasks[0].mod.functions.values())[0]
out_type = relay_func.body.checked_type
assert list(out_type.shape) == [1, 8, 130, 130, 4]
def test_meta_schedule_te2primfunc_argument_order_and_lowering():
@tvm.script.ir_module
class _fused_layout_transform:
@T.prim_func
def main(
placeholder: T.Buffer[(1, 3, 16, 16), "float32"],
T_layout_trans: T.Buffer[(1, 1, 16, 16, 3), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2, i3, i4 in T.grid(1, 1, 16, 16, 3):
with T.block("T_layout_trans"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder[ax0, ax1 * 3 + ax4, ax2, ax3])
T.writes(T_layout_trans[ax0, ax1, ax2, ax3, ax4])
T_layout_trans[ax0, ax1, ax2, ax3, ax4] = T.if_then_else(
ax0 < 1 and ax1 * 3 + ax4 < 3 and ax2 < 16 and ax3 < 16,
placeholder[ax0, ax1 * 3 + ax4, ax2, ax3],
T.float32(0),
dtype="float32",
)
@tvm.script.ir_module
class _fused_layout_transform_1:
@T.prim_func
def main(placeholder: T.Buffer[(1, 2, 16, 16, 4), "float32"], T_layout_trans: T.Buffer[(1, 8, 16, 16), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, |
i2, i3 in T.grid(1, 8, 16, 16):
with T.block("T_layout_trans"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(placeholder[ax0, ax1
T.writes(T_layout_trans[ax0, ax1, ax2, ax3])
T_layout_trans[ax0, ax1, ax2, ax3] = T.if_then_else(ax0 < 1 and ax1 < 8 and ax2 < 16 and ax3 < 16, placeholder[ax0, ax1
@tvm.script.ir_module
class _fused_nn_contrib_conv2d_NCHWc:
@T.prim_func
def main(placeholder: T.Buffer[(1, 1, 16, 16, 3), "float32"], placeholder_1: T.Buffer[(2, 1, 5, 5, 3, 4), "float32"], conv2d_NCHWc: T.Buffer[(1, 2, 16, 16, 4), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
data_pad = T.alloc_buffer([1, 1, 20, 20, 3], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 1, 20, 20, 3):
with T.block("data_pad"):
i0_1, i1_1, i2_1, i3_1, i4_1 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder[i0_1, i1_1, i2_1 - 2, i3_1 - 2, i4_1])
T.writes(data_pad[i0_1, i1_1, i2_1, i3_1, i4_1])
data_pad[i0_1, i1_1, i2_1, i3_1, i4_1] = T.if_then_else(2 <= i2_1 and i2_1 < 18 and 2 <= i3_1 and i3_1 < 18, placeholder[i0_1, i1_1, i2_1 - 2, i3_1 - 2, i4_1], T.float32(0), dtype="float32")
for i0, i1, i2, i3, i4, i5, i6, i7 in T.grid(1, 2, 16, 16, 4, 3, 5, 5):
with T.block("conv2d_NCHWc"):
n, oc_chunk, oh, ow, oc_block, ic, kh, kw = T.axis.remap("SSSSSRRR", [i0, i1, i2, i3, i4, i5, i6, i7])
T.reads(data_pad[n, ic
T.writes(conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block] = T.float32(0)
conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block] + data_p |
ad[n, ic
def _create_verification_database():
@ms.derived_object |
class VerificationDatabase(ms.database.PyDatabase):
def __init__(self):
super().__init__()
self.tuning_records_: List[TuningRecord] = []
self.workloads_: List[Workload] = []
def has_workload(self, mod: IRModule) -> bool:
for workload in self.workloads_:
if tvm.ir.structural_equal(mod, workload.mod):
return True
raise ValueError(
"The workload searched for is not in given database!"
+ " Incorrect TIR was generated from TE subgraph."
)
def commit_workload(self, mod: IRModule) -> ms.database.Workload:
workload = ms.database.Workload(mod)
self.workloads_.append(workload)
return workload
def commit_tuning_record(self, record: TuningRecord) -> None:
self.tuning_records_.append(record)
def get_all_tuning_records(self) -> List[TuningRecord]:
return self.tuning_records_
def get_top_k(self, workload: ms.database.Workload, top_k: int) -> List[TuningRecord]:
return sorted(
list(
filter(
lambda x: tvm.ir.structural_equal(workload.mod, x.workload.mod),
self.tuning_records_,
)
),
key=lambda x: sum(x.run_secs) / len(x.run_secs) if x.run_secs else 1e9,
)[:top_k]
def __len__(self) -> int:
return len(self.tuning_records_)
database = VerificationDatabase()
def _commit(mod):
workload = database.commit_workload(mod)
database.commit_tuning_record(
ms.database.TuningRecord(
tir.schedule.Trace([], {}),
workload=workload, |
run_secs=[0.1],
)
)
_commit(_fused_layout_transform)
_commit(_fused_layout_transform_1)
_commit(_fused_nn_contrib_conv2d_NCHWc)
return database
data_shape = (1, 3, 16, 16)
weight_shape = (8, 3, 5, 5)
def _create_relay_mod():
data = relay.var("data", relay.TensorType(data_shape, "float32"))
weight = relay.var("weight", relay.TensorType(weight_shape, "float32"))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
kernel_layout="OIHW",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
return mod
mod = _create_relay_mod()
dev = tvm.cpu()
target = Target("llvm --num-cores=16")
params = {
"weight": np.random.rand(*weight_shape).astype("float32"),
}
data = tvm.nd.array(
np.random.rand(*data_shape).astype("float32"),
dev,
)
with target, _create_verification_database(), PassContext(
opt_level=3,
config={
"relay.backend.use_meta_schedule": True,
"relay.backend.use_meta_schedule_dispatch": 7,
"relay.backend.tir_converter": "default",
},
):
rt_mod1 = relay.build(mod, target=target, params=params)
with tvm.transform.PassContext(opt_level=0):
rt_mod2 = relay.build(mod, target=target, params=params)
def get_output(data, lib):
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
return module.get_output(0).numpy()
actual_output = get_output(data, rt_mod1)
expected_output = get_output(data, rt_mod2)
assert np.allclose(actual_output, expected_output, rtol=1e-4, atol=2e-4)
def test_rewrite_layout_link_params():
I, O, H, W = 64, 64, 56, 56
kH = kW = 3
strides = ( |
1, 1)
padding = (1, 1)
data_shape = (1, H, W, I)
w_shape = (kH, kW, I, O)
bias_shape = (1, 1, 1, O)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight1", shape=w_shape, dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
conv = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=(kH, kW),
channels=O,
padding=padding,
strides=strides,
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
mod = tvm.IRModule.from_expr(conv + bias)
weight_np = np.random.randn(*w_shape).astype("float32")
bias_np = np.random.randn(*bias_shape).astype("float32")
params = {"weight1": weight_np, "bias": bias_np}
data_np = np.random.randn(*data_shape).astype("float32")
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight_np, bias_np])
.numpy()
)
link_params = True
target = "llvm --num-cores=4"
executor = relay.backend.Executor("graph", {"link-params": link_params})
mod = mod.with_attr("executor", executor)
for strategy in ["replay-trace", "evolutionary"]:
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target=target,
params=params,
work_dir=work_dir,
max_trials_global=4,
strategy=strategy,
)
lib = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=target,
params=params,
)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
np.testing.assert_allclose(ref, o |
ut, rtol=1e-4, atol=1e-4)
def test_module_equality_ignore_ndarray():
target = "llvm --num-cores=4"
data_shape = (128, 128)
weight_shape1 = (128, 128)
weight_shape2 = (128, 128)
data = relay.var("data", shape=data_shape, dtype="float32")
weight1 = relay.var("weight1", shape=weight_shape1, dtype="float32")
weight2 = relay.var("weight2", shape=weight_shape2, dtype="float32")
dense1 = relay.nn.dense(data, weight1)
dense2 = relay.nn.dense(dense1, weight2)
mod = tvm.IRModule.from_expr(dense2)
weight1_np = np.random.randn(*weight_shape1).astype("float32")
weight2_np = np.random.randn(*weight_shape2).astype("float32")
params = {"weight1": weight1_np, "weight2": weight2_np}
executor = relay.backend.Executor("graph", {"link-params": True})
mod = mod.with_attr("executor", executor)
assert len(ms.relay_integration.extract_tasks(mod, target, params)) == 2
module_eqality = "ignore-ndarray"
extracted_tasks = ms.relay_integration.extract_tasks(
mod, target, params, module_equality=module_eqality
)
assert len(extracted_tasks) == 1
with tempfile.TemporaryDirectory() as work_dir:
tasks, task_weights = ms.relay_integration.extracted_tasks_to_tune_contexts(
extracted_tasks, work_dir, strategy="replay-trace"
)
database = ms.tune.tune_tasks(
tasks=tasks,
task_weights=task_weights,
work_dir=work_dir,
max_trials_global=4,
module_equality=module_eqality,
)
lib = ms.relay_integration.compile_relay(database, mod, target, params)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
data_np = np.random.randn(*data_shape).astype("float32")
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = np.dot(np.dot(data_np, weight1_np.transpose()), weight2_np.transpose())
np.testing.assert_allclose(ref, out, rtol=1e-4, |
atol=1e-4)
def _test_anchor_tuning(target):
data_shape = (128, 128)
weight_shape1 = (128, 128)
weight_shape2 = (128, 128)
data = relay.var("data", shape=data_shape, dtype="float32")
weight1 = relay.var("weight1", shape=weight_shape1, dtype="float32")
weight2 = relay.var("weight2", shape=weight_shape2, dtype="float32")
dense1 = relay.nn.dense(data, weight1)
dense2 = relay.nn.dense(dense1 + relay.const(1.0, dtype="float32"), weight2)
mod = tvm.IRModule.from_expr(dense2 - data + relay.const(1.0, dtype="float32"))
weight1_np = np.random.randn(*weight_shape1).astype("float32")
weight2_np = np.random.randn(*weight_shape2).astype("float32")
data_np = np.random.randn(*data_shape).astype("float32")
params = {"weight1": weight1_np, "weight2": weight2_np}
module_equality = "anchor-block"
extracted_tasks = ms.relay_integration.extract_tasks(
mod, target, params, module_equality=module_equality
)
assert len(extracted_tasks) == 1
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target=target,
params=params,
work_dir=work_dir,
max_trials_global=4,
strategy="replay-trace",
module_equality=module_equality,
)
lib = ms.relay_integration.compile_relay(database, mod, target, params)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight1_np, weight2_np])
.numpy()
)
np.testing.assert_allclose(ref, out, atol=1e-3)
def test_anchor_tuning_cpu():
_test_anchor_tuning("llvm --num-cores=4")
def test_anchor_tuning_cpu_link_params():
data_shape = (128, 128)
weight_shape1 = (128, 128)
w |
eight_shape2 = (128, 128)
data = relay.var("data", shape=data_shape, dtype="float32")
weight1 = relay.var("weight1", shape=weight_shape1, dtype="float32")
weight2 = relay.var("weight2", shape=weight_shape2, dtype="float32")
dense1 = relay.nn.dense(data, weight1)
dense2 = relay.nn.dense(dense1, weight2)
mod = tvm.IRModule.from_expr(dense2 + relay.const(1.0, dtype="float32"))
weight1_np = np.random.randn(*weight_shape1).astype("float32")
weight2_np = np.random.randn(*weight_shape2).astype("float32")
data_np = np.random.randn(*data_shape).astype("float32")
params = {"weight1": weight1_np, "weight2": weight2_np}
module_equality = "anchor-block"
target = "llvm --num-cores=4"
executor = relay.backend.Executor("graph", {"link-params": True})
mod = mod.with_attr("executor", executor)
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target=target,
params=params,
work_dir=work_dir,
max_trials_global=4,
strategy="replay-trace",
module_equality=module_equality,
)
lib = ms.relay_integration.compile_relay(database, mod, target, params)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight1_np, weight2_np])
.numpy()
)
np.testing.assert_allclose(ref, out, atol=1e-3)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import autotvm
from tvm |
import meta_schedule as ms
from tvm |
import relay, te
from tvm.relay.testing.temp_op_attr |
import TempOpAttr
from tvm.script |
import tir as T
def compute_tir_conv2d_nchw_oihw(data_shape, weight_shape, dtype):
assert dtype == "float32"
OC, IC, FH, FW = weight_shape
padding = (0, 0, 0, 0)
strides = (1, 1)
dilation = (1, 1)
output_shape = (
data_shape[0],
weight_shape[0],
(data_shape[2] - ((weight_shape[2] - 1) * dilation[0] + 1) + padding[0] + padding[1])
+ 1,
(data_shape[3] - ((weight_shape[3] - 1) * dilation[1] + 1) + padding[2] + padding[3])
+ 1,
)
N, K, BH, BW = output_shape
@T.prim_func
def conv2d(a: T.handle, filt: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, data_shape, dtype=dtype)
Filter = T.match_buffer(filt, weight_shape, dtype=dtype)
B = T.match_buffer(b, output_shape, dtype=dtype)
for n, k, bh, bw in T.grid(N, K, BH, BW):
with T.block("init"):
vn, vk, vbh, vbw = T.axis.remap("SSSS", [n, k, bh, bw])
B[vn, vk, vbh, vbw] = T.float32(0)
for ic, fh, fw in T.grid(IC, FH, FW):
with T.block("update"):
vn, vk, vbh, vbw, vc, vfh, vfw = T.axis.remap("SSSSRRR", [n, k, bh, bw, ic, fh, fw])
B[vn, vk, vbh, vbw] = B[vn, vk, vbh, vbw] + A[vn, vc, vbh + vfh, vbw + vfw] * Filter[vk, vc, vfh, vfw]
return conv2d
def schedule_tir_conv2d_nchw_oihw(sch):
update_block = sch.get_block("update")
vn, vk, vbh, vbw, vc, vfh, vfw = sch.get_loops(update_block)
sch.split(vk, factors=(None, 32))
@autotvm.register_topi_compute("test/conv2d_1")
def _compute_conv2d_1(cfg, input, filter, strides, padding, dilation, out_dtype):
prim_func = compute_tir_conv2d_nchw_oihw(input.shape, filter.shape, input.dtype)
output = te.extern_primfunc([input, filter], prim_func, name="tir")
return output
@autotvm.register_topi_schedule("test/conv2d_1")
def _schedule_conv2d_1(cfg, outs):
s = te.create_schedul |
e([x.op for x in outs])
return s
@tvm.target.override_native_generic_func("test_conv2d_strategy")
def _tmp_strategy(attrs, inputs, out_type, target):
strategy = relay.op.OpStrategy()
if attrs.groups == 1 and attrs.data_layout == "NCHW" and attrs.kernel_layout == "OIHW":
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_1),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_1),
name="conv2d_2",
plevel=15,
)
else:
raise ValueError("No valid strategy found")
return strategy
def get_conv2d(data_shape, weight_shape, **kwargs):
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv2d = relay.nn.conv2d(
data,
weight,
**kwargs,
)
return relay.Function([data, weight], conv2d)
def get_ref(data, weight, stride, padding):
return tvm.topi.testing.conv2d_nchw_python(data, weight, stride, padding)
def test_conv2d():
N, IC, H, W = 1, 64, 56, 56
OC, IC, FH, FW = 128, 64, 3, 3
data_shape = (N, IC, H, W)
weight_shape = (OC, IC, FH, FW)
padding = (0, 0)
strides = (1, 1)
relay_mod = tvm.IRModule.from_expr(
get_conv2d(
data_shape,
weight_shape,
padding=padding,
strides=strides,
channels=OC,
kernel_size=(FH, FW),
data_layout="NCHW",
kernel_layout="OIHW",
)
)
data_np = np.random.randn(*data_shape).astype("float32")
weight_np = np.random.randn(*weight_shape).astype("float32")
target = "llvm"
params = {"weight": weight_np}
def schedule_fn(sch):
if "nn_conv2d" in sch.mod.attrs["task_name"]:
schedule_tir_conv2d_nchw_oihw(sch)
return True
return False
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
with ms.database.ScheduleFnDatabase(schedule_fn), tvm.trans |
form.PassContext(
opt_level=3,
config={
"relay.backend.use_meta_schedule": True,
"relay.backend.tir_converter": "allow_extern",
},
):
lib = relay.build(relay_mod, target=target, params=params)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = get_ref(data_np, weight_np, strides, padding)
tvm.testing.assert_allclose(out, ref, atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
test_conv2d() |
""" Test Meta Schedule Runner """ |
import itertools |
import sys |
import time
from typing |
import Any, List |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm._ffi |
import register_func
from tvm.meta_schedule.arg_info |
import TensorInfo
from tvm.meta_schedule.builder |
import BuilderInput, LocalBuilder
from tvm.meta_schedule.runner |
import (
EvaluatorConfig,
LocalRunner,
PyRunner,
RPCConfig,
RPCRunner,
RunnerFuture,
RunnerInput,
)
from tvm.meta_schedule.runner.local_runner |
import (
default_alloc_argument as local_default_alloc_argument,
)
from tvm.meta_schedule.runner.rpc_runner |
import (
T_ARG_INFO_JSON_OBJ_LIST,
T_ARGUMENT_LIST,
)
from tvm.meta_schedule.runner.rpc_runner |
import (
default_alloc_argument as rpc_default_alloc_argument,
)
from tvm.meta_schedule.testing.local_rpc |
import LocalRPC
from tvm.meta_schedule.utils |
import (
derived_object,
get_global_func_with_default_on_worker,
)
from tvm.rpc |
import RPCSession
from tvm.runtime |
import Device, Module
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.tir |
import FloatImm
MATMUL_N = 16
MATMUL_M = 32
@tvm.script.ir_module
class MatmulModule:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i, j, k in T.grid(16, 16, 16):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class MatmulReluModule:
@T.prim_func
def main(a: T.handle, b: T.handle, d: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
D = T.match_buffer(d, (16, 16), "float32")
C = T.alloc_buffer((16, 16), "float32")
for i, j, k in T.grid(16, 16, 16):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(16, 16):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.max(C[vi, vj], 0.0)
@tvm.script.ir_module
class BatchMatmulModule:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, [16, 32, 32])
B = T.match_buffer(b, [16, 32, 32])
C = T.match_buffer(c, [16, 32, 32])
for n, i, j, k in T.grid(16, 32, 32, 32):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
with T.init():
C[vn, vi, vj] = 0 |
.0
C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk]
@tvm.script.ir_module
class AddModule:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, [32], "float32")
B = T.match_buffer(b, [32], "float32")
C = T.match_buffer(c, [32], "float32")
for i in range(32):
with T.block("add"):
vi = T.axis.S(32, i)
C[vi] = A[vi] + B[vi]
def _clean_build(artifact_path: str) -> None:
f_clean_build = get_global_func_with_default_on_worker("meta_schedule.remove_build_dir", None)
if f_clean_build is not None:
f_clean_build(artifact_path)
else:
raise RuntimeError("Unable to find remove_build_dir function.")
def test_meta_schedule_rpc_single_run():
"""Test meta schedule rpc runner for a single run"""
mod = MatmulModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(rpc_config, evaluator_config)
(runner_future,) = runner.run([runner_input])
runner_result = runne |
r_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
_clean_build(builder_result.artifact_path)
def test_meta_schedule_local_single_run():
"""Test meta schedule local runner for a single run"""
mod = MatmulModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = LocalRunner(timeout_sec=100, evaluator_config=evaluator_config)
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
_clean_build(builder_result.artifact_path)
def test_meta_schedule_rpc_multiple_runs():
"""Test meta schedule rpc runner for multiple runs"""
mods = [
MatmulModule,
MatmulReluModule,
BatchMatmulModule,
]
builder = LocalBuilder()
builder_inputs = [BuilderInput(mod, Target("llvm")) for mod in mods]
builder_results = builder.build(builder_inputs)
for builder_result in builder_results:
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
args_infos = [
[ |
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
[
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
],
]
runner_inputs = [
RunnerInput(builder_results[i].artifact_path, "llvm", args_infos[i])
for i in range(len(mods))
]
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(rpc_config, evaluator_config)
runner_futures = runner.run(runner_inputs)
runner_results = [runner_future.result() for runner_future in runner_futures]
for runner_result in runner_results:
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
for builder_result in builder_results:
_clean_build(builder_result.artifact_path)
def test_meta_schedule_local_multiple_runs():
"""Test meta schedule local runner for multiple runs"""
mods = [
MatmulModule,
MatmulReluModule,
BatchMatmulModule,
]
builder = LocalBuilder()
builder_inputs = [BuilderInput(mod, Target("llvm")) for mo |
d in mods]
builder_results = builder.build(builder_inputs)
for builder_result in builder_results:
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
args_infos = [
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
[
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
],
]
runner_inputs = [
RunnerInput(builder_results[i].artifact_path, "llvm", args_infos[i])
for i in range(len(mods))
]
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = LocalRunner(timeout_sec=100, evaluator_config=evaluator_config)
runner_futures = runner.run(runner_inputs)
runner_results = [runner_future.result() for runner_future in runner_futures]
for runner_result in runner_results:
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
for builder_result in builder_results:
_clean_build(builder_result.artifact_path)
def test_meta_schedule_py_runner():
"""Test meta schedule PyRunner"""
@derived_object |
class TestRunner(PyRunner):
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
raise ValueError("TestRunner")
runner = TestRunner()
with pytest.raises(ValueError, match="TestRunner"):
runner.run([])
def test_meta_schedule_rpc_runner_time_out():
"""Test meta schedule RPC Runner time out"""
def initializer():
@register_func("meta_schedule.runner.test_time_out")
def timeout_session_creator(
rpc_config: RPCConfig,
) -> RPCSession:
time.sleep(2)
runner_input = RunnerInput(
"test",
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=1,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(
rpc_config,
evaluator_config,
initializer=initializer,
f_create_session="meta_schedule.runner.test_time_out",
)
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is not None and runner_result.error_msg.startswith(
"RPCRunner: Timeout, killed after"
)
assert runner_result.run_secs is None
def test_meta_schedule_local_runner_time_out():
"""Test meta schedule Local Runner time out"""
mod = MatmulModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_re |
sult.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
def initializer():
@register_func("meta_schedule.runner.test_time_out")
def timeout_session_creator(
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> RPCSession:
time.sleep(2)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = LocalRunner(
timeout_sec=1,
evaluator_config=evaluator_config,
initializer=initializer,
f_alloc_argument="meta_schedule.runner.test_time_out",
)
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is not None and runner_result.error_msg.startswith(
"LocalRunner: Timeout, killed after"
)
assert runner_result.run_secs is None
_clean_build(builder_result.artifact_path)
def test_meta_schedule_rpc_runner_exception():
"""Test meta schedule RPC Runner exception"""
def initializer():
@register_func("meta_schedule.runner.test_exception")
def exception_session_creator(
rpc_config: RPCConfig,
) -> RPCSession:
raise Exception("Test")
runner_input = RunnerInput(
"test",
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key, |
session_priority=1,
session_timeout_sec=100,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(
rpc_config,
evaluator_config,
initializer=initializer,
f_create_session="meta_schedule.runner.test_exception",
)
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is not None and runner_result.error_msg.startswith(
"RPCRunner: An exception occurred\n"
)
assert runner_result.run_secs is None
def test_meta_schedule_local_runner_exception():
"""Test meta schedule Local Runner exception"""
mod = MatmulModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
def initializer():
@register_func("meta_schedule.runner.test_exception")
def timeout_session_creator(
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> RPCSession:
raise Exception("Test")
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = LocalRunner(
evaluator_config=evaluator_config,
initializer=initializer,
f_alloc_argument="meta_schedule.runner.test_exception",
)
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result() |
assert runner_result.error_msg is not None and runner_result.error_msg.startswith(
"LocalRunner: An exception occurred\n"
)
assert runner_result.run_secs is None
_clean_build(builder_result.artifact_path)
def test_meta_schedule_runner_matmul_test():
"""Test meta schedule runner with add module"""
def _check_correct_matmul(
args_before: List[np.ndarray],
args_after: List[np.ndarray],
) -> None:
a_before, b_before, c_before = args_before
a_after, b_after, c_after = args_after
c_before = np.matmul(a_before, b_before)
assert (a_before == a_after).all()
assert (b_before == b_after).all()
tvm.testing.assert_allclose(c_before, c_after, rtol=1e-5)
def test_alloc_argument(
session: RPCSession,
device: Device,
args_info: Any,
alloc_repeat: int,
) -> List[Any]:
global repeated_args_before
repeated_args_before = []
repeated_args = rpc_default_alloc_argument(session, device, args_info, alloc_repeat)
for args in repeated_args:
repeated_args_before.append([arg.numpy() for arg in args])
return repeated_args
def test_run_evaluator(
session: RPCSession,
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[Any],
) -> List[float]:
global repeated_args_before
repeated_args_after = []
evaluator = rt_mod.time_evaluator(
func_name=rt_mod.entry_name,
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
device.sync()
profile_result = evaluator(*args) |
repeated_costs.append(profile_result.results)
repeated_args_after.append([arg.numpy() for arg in args])
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
for args_before, args_after in zip(
repeated_args_before,
repeated_args_after,
):
_check_correct_matmul(args_before, args_after)
del repeated_args_before
return costs
mod = MatmulModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(
rpc_config,
evaluator_config,
f_alloc_argument=test_alloc_argument,
f_run_evaluator=test_run_evaluator,
)
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
_clean_build(builder_result.artifact_path)
def test_meta_schedule_runner_add_test():
"""Test meta schedule runner w |
ith add module"""
def _check_correct_add(args_before: List[np.ndarray], args_after: List[np.ndarray]) -> None:
a_before, b_before, c_before = args_before
a_after, b_after, c_after = args_after
c_before = a_before + b_before
assert (a_before == a_after).all()
assert (b_before == b_after).all()
assert (c_before == c_after).all()
def test_alloc_argument(
session: RPCSession,
device: Device,
args_info: Any,
alloc_repeat: int,
) -> List[Any]:
global repeated_args_before
repeated_args_before = []
repeated_args = rpc_default_alloc_argument(
session,
device,
args_info,
alloc_repeat,
)
for args in repeated_args:
repeated_args_before.append([arg.numpy() for arg in args])
return repeated_args
def test_run_evaluator(
session: RPCSession,
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[Any],
) -> List[float]:
global repeated_args_before
repeated_args_after = []
evaluator = rt_mod.time_evaluator(
func_name=rt_mod.entry_name,
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
device.sync()
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
repeated_args_after.append([arg.numpy() for arg in args])
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
for args_before, args_after in zip(
repeated_args_before,
repeated_args_a |
fter,
):
_check_correct_add(args_before, args_after)
del repeated_args_before
return costs
mod = AddModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", [MATMUL_M]),
TensorInfo("float32", [MATMUL_M]),
TensorInfo("float32", [MATMUL_M]),
],
)
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(
rpc_config,
evaluator_config,
f_alloc_argument=test_alloc_argument,
f_run_evaluator=test_run_evaluator,
)
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
_clean_build(builder_result.artifact_path)
def test_meta_schedule_local_runner_add_test():
"""Test meta schedule local runner with add module"""
def _check_correct_add(args_before: List[np.array], args_after: List[np.array]) -> None:
a_before, b_before, c_before = args_before
a_after, b_after, c_after = args_after
c_before = a_before + b_before
assert (a_before == a_after).all()
assert (b_before == b_after).all() |
assert (c_before == c_after).all()
def test_alloc_argument(
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
global repeated_args_before
repeated_args_before = []
repeated_args = local_default_alloc_argument(device, args_info, alloc_repeat)
for args in repeated_args:
repeated_args_before.append([arg.asnumpy() for arg in args])
return repeated_args
def test_run_evaluator(
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[Any],
) -> List[float]:
global repeated_args_before
repeated_args_after = []
evaluator = rt_mod.time_evaluator(
func_name=rt_mod.entry_name,
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
device.sync()
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
repeated_args_after.append([arg.asnumpy() for arg in args])
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
for args_before, args_after in zip(repeated_args_before, repeated_args_after):
_check_correct_add(args_before, args_after)
del repeated_args_before
return costs
mod = AddModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
Tenso |
rInfo("float32", [MATMUL_M]),
TensorInfo("float32", [MATMUL_M]),
TensorInfo("float32", [MATMUL_M]),
],
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = LocalRunner(
timeout_sec=100,
evaluator_config=evaluator_config,
f_alloc_argument=test_alloc_argument,
f_run_evaluator=test_run_evaluator,
)
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
_clean_build(builder_result.artifact_path)
if __name__ == "__main__":
tvm.testing.main() |
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing |
import te_workload
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
)
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.te |
import create_prim_func
def test_cpu_matmul():
@T.prim_func
def cpu_matmul_0(
A: T.Buffer[(4, 512), "float32"],
B: T.Buffer[(512, 4), "float32"],
C: T.Buffer[(4, 4), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2 in T.grid(4, 4, 512):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(A[i, k], B[k, j])
T.writes(C[i, j])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
@T.prim_func
def cpu_matmul_1(
A: T.Buffer[(4, 512), "float32"],
B: T.Buffer[(512, 4), "float32"],
C: T.Buffer[(4, 4), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_rf = T.alloc_buffer([4, 4, 128], dtype="float32")
for i0, i1, i2_0, i2_1 in T.grid(4, 4, 4, 128):
with T.block("C_rf"):
vi2_1, i, j, vi2_0 = T.axis.remap("SSSR", [i2_1, i0, i1, i2_0])
T.reads(A[i, vi2_0 * 128 + vi2_1], B[vi2_0 * 128 + vi2_1, j])
T.writes(C_rf[i, j, vi2_1])
with T.init():
C_rf[i, j, vi2_1] = T.float32(0)
C_rf[i, j, vi2_1] = (
C_rf[i, j, vi2_1] + A[i, vi2_0 * 128 + vi2_1] * B[vi2_0 * 128 + vi2_1, j]
)
for i0, i1, i2_1 in T.grid(4, 4, 128):
with T.block("C"):
vi2_1, i, j = T.axis.remap("RSS", [i2_1, i0, i1])
T.reads(C_rf[i, j, vi2_1])
T.writes(C[i, j])
T.block_attr({"meta_schedule.random_compute_producer": 1})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + C_rf[i, j, vi2_1]
@T.prim_func
def cpu_matmul_2(
A: T.Buffer[(4, 512), "float32"],
B: T.Buffer[(512, 4), "float32"],
C: T.Buffer[(4, 4), "flo |
at32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_rf = T.alloc_buffer([4, 4, 4], dtype="float32")
for i0, i1, i2_0, i2_1 in T.grid(4, 4, 4, 128):
with T.block("C_rf"):
vi2_0, i, j, vi2_1 = T.axis.remap("SSSR", [i2_0, i0, i1, i2_1])
T.reads(A[i, vi2_0 * 128 + vi2_1], B[vi2_0 * 128 + vi2_1, j])
T.writes(C_rf[i, j, vi2_0])
with T.init():
C_rf[i, j, vi2_0] = T.float32(0)
C_rf[i, j, vi2_0] = (
C_rf[i, j, vi2_0] + A[i, vi2_0 * 128 + vi2_1] * B[vi2_0 * 128 + vi2_1, j]
)
for i0, i1, i2_0 in T.grid(4, 4, 4):
with T.block("C"):
vi2_0, i, j = T.axis.remap("RSS", [i2_0, i0, i1])
T.reads(C_rf[i, j, vi2_0])
T.writes(C[i, j])
T.block_attr({"meta_schedule.random_compute_producer": 1})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + C_rf[i, j, vi2_0]
decision_0 = []
decision_1 = [
("SamplePerfectTile", [4, 128]),
]
decision_2 = [
("SamplePerfectTile", [4, 128]),
]
mod = create_prim_func(te_workload.matmul(n=4, m=4, k=512))
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm --num-cores=32"),
types=ms.schedule_rule.AddRFactor,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[cpu_matmul_0, cpu_matmul_1, cpu_matmul_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_argmax():
@T.prim_func
def argmax(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1 in T.grid(128, 128):
with T.block("argmax"):
i = T.axis.spatial( |
128, i0)
k = T.axis.reduce(128, i1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_0(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[128, "int32"],
argmax_v1: T.Buffer[128, "float32"],
) -> None:
for i0, i1 in T.grid(128, 128):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_1(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[128, "int32"],
argmax_v1: T.Buffer[128, "float32"],
) -> None:
argmax_v0_rf = T.alloc_buffer([128, 16], dtype="int32")
argmax_v1_rf = T.alloc_buffer([128, 16], dtype="float32")
for i0, i1_0, i1_1 in T.grid(128, 8, 16):
with T.block("argmax_rf"):
vi1_1, i, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0]) |
T.reads(idx[i, vi1_0 * 16 + vi1_1], val[i, vi1_0 * 16 + vi1_1])
T.writes(argmax_v0_rf[i, vi1_1], argmax_v1_rf[i, vi1_1])
with T.init():
argmax_v0_rf[i, vi1_1] = -1
argmax_v1_rf[i, vi1_1] = T.float32(-3.4028234663852886e38)
v_argmax_v0_rf: T.int32 = T.Select(
argmax_v1_rf[i, vi1_1] >= val[i, vi1_0 * 16 + vi1_1],
argmax_v0_rf[i, vi1_1],
idx[i, vi1_0 * 16 + vi1_1],
)
v_argmax_v1_rf: T.float32 = T.Select(
argmax_v1_rf[i, vi1_1] >= val[i, vi1_0 * 16 + vi1_1],
argmax_v1_rf[i, vi1_1],
val[i, vi1_0 * 16 + vi1_1],
)
argmax_v0_rf[i, vi1_1] = v_argmax_v0_rf
argmax_v1_rf[i, vi1_1] = v_argmax_v1_rf
for i0, i1_1 in T.grid(128, 16):
with T.block("argmax"):
vi1_1, i = T.axis.remap("RS", [i1_1, i0])
T.reads(argmax_v0_rf[i, vi1_1], argmax_v1_rf[i, vi1_1])
T.writes(argmax_v0[i], argmax_v1[i])
T.block_attr({"meta_schedule.random_compute_producer": 1})
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_1], argmax_v0[i], argmax_v0_rf[i, vi1_1]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_1], argmax_v1[i], argmax_v1_rf[i, vi1_1]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_2(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[128, "int32"],
argmax_v1: T.Buffer[128, "float32"],
) -> None:
argmax_v0_r |
f = T.alloc_buffer([128, 8], dtype="int32")
argmax_v1_rf = T.alloc_buffer([128, 8], dtype="float32")
for i0, i1_0, i1_1 in T.grid(128, 8, 16):
with T.block("argmax_rf"):
vi1_0, i, vi1_1 = T.axis.remap("SSR", [i1_0, i0, i1_1])
T.reads(idx[i, vi1_0 * 16 + vi1_1], val[i, vi1_0 * 16 + vi1_1])
T.writes(argmax_v0_rf[i, vi1_0], argmax_v1_rf[i, vi1_0])
with T.init():
argmax_v0_rf[i, vi1_0] = -1
argmax_v1_rf[i, vi1_0] = T.float32(-3.4028234663852886e38)
v_argmax_v0_rf: T.int32 = T.Select(
argmax_v1_rf[i, vi1_0] >= val[i, vi1_0 * 16 + vi1_1],
argmax_v0_rf[i, vi1_0],
idx[i, vi1_0 * 16 + vi1_1],
)
v_argmax_v1_rf: T.float32 = T.Select(
argmax_v1_rf[i, vi1_0] >= val[i, vi1_0 * 16 + vi1_1],
argmax_v1_rf[i, vi1_0],
val[i, vi1_0 * 16 + vi1_1],
)
argmax_v0_rf[i, vi1_0] = v_argmax_v0_rf
argmax_v1_rf[i, vi1_0] = v_argmax_v1_rf
for i0, i1_0 in T.grid(128, 8):
with T.block("argmax"):
vi1_0, i = T.axis.remap("RS", [i1_0, i0])
T.reads(argmax_v0_rf[i, vi1_0], argmax_v1_rf[i, vi1_0])
T.writes(argmax_v0[i], argmax_v1[i])
T.block_attr({"meta_schedule.random_compute_producer": 1})
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_0], argmax_v0[i], argmax_v0_rf[i, vi1_0]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_0], argmax_v1[i], argmax_v1_rf[i, vi1_0]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_arg |
max_v1
decision_0 = []
decision_1 = [
("SamplePerfectTile", [8, 16]),
]
decision_2 = [
("SamplePerfectTile", [8, 16]),
]
mod = argmax
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm --num-cores=32"),
types=ms.schedule_rule.AddRFactor,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[argmax_0, argmax_1, argmax_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
if __name__ == "__main__":
test_cpu_matmul()
test_cpu_argmax() |
from typing |
import List |
import tempfile |
import pytest |
import tvm
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.schedule_rule |
import ApplyCustomRule
from tvm.script |
import tir as T
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
T.block_attr({"schedule_rule": "test_apply_custom_rule"})
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.register_func("meta_schedule.cpu.test_apply_custom_rule")
def sch_fn(sch: tvm.tir.Schedule, block: tvm.tir.Block) -> List[tvm.tir.Schedule]:
raise ValueError("Intended for meta_schedule.cpu.test_apply_custom_rule")
def test_custom_rule():
with pytest.raises(ValueError) as e_info:
with tempfile.TemporaryDirectory() as tmpdir:
sch_rules = [ApplyCustomRule()]
space_gen = ms.space_generator.PostOrderApply(sch_rules=sch_rules)
ms.tune_tir(
mod=Matmul,
target="llvm -num-cores=1",
work_dir=tmpdir,
max_trials_global=10,
space=space_gen,
)
assert "ValueError: Intended for meta_schedule.cpu.test_apply_custom_rule" in str(e_info.value)
if __name__ == "__main__":
test_custom_rule() |
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
)
from tvm.script |
import tir as T
from tvm.target |
import Target
@T.prim_func
def element_wise(var_A: T.handle, var_B: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
for i, j in T.grid(512, 512):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def reduction_loop_only(
A: T.Buffer[2, "float32"],
B: T.Buffer[2, "float32"],
C: T.Buffer[(), "float32"],
) -> None:
for i0 in T.serial(2):
with T.block("C"):
k0 = T.axis.reduce(2, i0)
T.reads(A[k0], B[k0])
T.writes(C[()])
with T.init():
C[()] = T.float32(1.0)
C[()] = T.min(C[()], A[k0] / B[k0])
@T.prim_func
def zero_dim_add(
A: T.Buffer[(), "float32"],
B: T.Buffer[(), "float32"],
C: T.Buffer[(), "float32"],
) -> None:
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
def test_cuda_element_wise():
@T.prim_func
def elementwise_0(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
) -> None:
for i_j_fused_0 in T.thread_binding(256, thread="blockIdx.x"):
for i_j_fused_1 in T.thread_binding(1024, thread="threadIdx.x"):
with T.block("C"):
vi = T.axis.spatial(512, (i_j_fused_0 * 1024 + i_j_fused_1)
vj = T.axis.spatial(512, (i_j_fused_0 * 1024 + i_j_fused_1) % 512)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] + T.float32(1)
decision_0 = [
("SampleCategorical", 5),
]
mod = element_wise
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080", host="llvm"),
types=ms.schedule_rule.AutoBind,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[elementwise_0], |
expected_decisions=[decision_0],
)
def test_cuda_reduction_loop_only():
@T.prim_func
def reduction_loop_only_0(
A: T.Buffer[2, "float32"],
B: T.Buffer[2, "float32"],
C: T.Buffer[(), "float32"],
) -> None:
for u_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for u_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
for i0 in T.serial(2):
with T.block("C"):
k0 = T.axis.reduce(2, i0)
T.reads(A[k0], B[k0])
T.writes(C[()])
with T.init():
C[()] = T.float32(1)
C[()] = T.min(C[()], A[k0] / B[k0])
mod = reduction_loop_only
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080", host="llvm"),
types=ms.schedule_rule.AutoBind,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[reduction_loop_only_0],
expected_decisions=[[]],
)
def test_cuda_zero_dim_add():
@T.prim_func
def zero_dim_add_0(
A: T.Buffer[(), "float32"],
B: T.Buffer[(), "float32"],
C: T.Buffer[(), "float32"],
) -> None:
for u_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for u_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
with T.block("C"):
vi = T.axis.spatial(1, 0)
T.reads(A[()], B[()])
T.writes(C[()])
C[()] = A[()] + B[()]
mod = zero_dim_add
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080", host="llvm"),
types=ms.schedule_rule.AutoBind,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[zero_dim_add_0],
expected_decisions=[[]],
)
if __name__ == "__main__":
test_cuda_element_wise() |
test_cuda_reduction_loop_only()
test_cuda_zero_dim_add() |
import pytest |
import tvm
from tvm.tir |
import Schedule
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation |
import generate_design_space
from tvm.script |
import tir as T
from tvm.target |
import Target
@tvm.script.ir_module
class Conv2DBiasBnReLU:
@T.prim_func
def main(var_X: T.handle, var_W: T.handle, var_B: T.handle, var_bn_scale: T.handle, var_bn_offset: T.handle, var_compute: T.handle) -> None:
X = T.match_buffer(var_X, [1, 512, 56, 56], dtype="float32")
W = T.match_buffer(var_W, [512, 512, 3, 3], dtype="float32")
B = T.match_buffer(var_B, [512, 1, 1], dtype="float32")
bn_scale = T.match_buffer(var_bn_scale, [512, 1, 1], dtype="float32")
bn_offset = T.match_buffer(var_bn_offset, [512, 1, 1], dtype="float32")
compute = T.match_buffer(var_compute, [1, 512, 56, 56], dtype="float32")
pad_temp = T.alloc_buffer([1, 512, 58, 58], dtype="float32")
compute_1 = T.alloc_buffer([1, 512, 56, 56], dtype="float32")
bias_add = T.alloc_buffer([1, 512, 56, 56], dtype="float32")
bn_mul = T.alloc_buffer([1, 512, 56, 56], dtype="float32")
bn_add = T.alloc_buffer([1, 512, 56, 56], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 512, 58, 58):
with T.block("pad_temp"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
pad_temp[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(i2_1 >= 1 and i2_1 < 57 and i3_1 >= 1 and i3_1 < 57, X[i0_1, i1_1, i2_1 - 1, i3_1 - 1], T.float32(0), dtype="float32")
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 512, 56, 56, 512, 3, 3):
with T.block("compute"):
nn, ff, yy, xx, rc, ry, rx = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
compute_1[nn, ff, yy, xx] = T.float32(0)
compute_1[nn, ff, yy, xx] = compute_1[nn, ff, yy, xx] + pad_temp[nn, rc, yy + ry, xx + rx] * W[ff, rc, ry, rx]
for i0, i1, i2, i3 in T.grid(1, 512, 56, 56):
with T.block("bias_add"):
i, j, k, l = T.axis.remap("SSSS", [i0, i1, i2, i3])
bias_add[i, j, k, l] = compute_1[i, j, k, l] + B[j, 0, 0]
for i0, |
i1, i2, i3 in T.grid(1, 512, 56, 56):
with T.block("bn_mul"):
i, j, k, l = T.axis.remap("SSSS", [i0, i1, i2, i3])
bn_mul[i, j, k, l] = bias_add[i, j, k, l] * bn_scale[j, 0, 0]
for i0, i1, i2, i3 in T.grid(1, 512, 56, 56):
with T.block("bn_add"):
i, j, k, l = T.axis.remap("SSSS", [i0, i1, i2, i3])
bn_add[i, j, k, l] = bn_mul[i, j, k, l] + bn_offset[j, 0, 0]
for i0, i1, i2, i3 in T.grid(1, 512, 56, 56):
with T.block("compute_1"):
i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3])
compute[i0_2, i1_2, i2_2, i3_2] = T.max(bn_add[i0_2, i1_2, i2_2, i3_2], T.float32(0))
@tvm.script.ir_module
class Conv2DBiasBnReLUInlined:
@T.prim_func
def main(var_X: T.handle, var_W: T.handle, var_B: T.handle, var_bn_scale: T.handle, var_bn_offset: T.handle, var_compute: T.handle) -> None:
X = T.match_buffer(var_X, [1, 512, 56, 56], dtype="float32")
W = T.match_buffer(var_W, [512, 512, 3, 3], dtype="float32")
B = T.match_buffer(var_B, [512, 1, 1], dtype="float32")
bn_scale = T.match_buffer(var_bn_scale, [512, 1, 1], dtype="float32")
bn_offset = T.match_buffer(var_bn_offset, [512, 1, 1], dtype="float32")
compute = T.match_buffer(var_compute, [1, 512, 56, 56], dtype="float32")
pad_temp = T.alloc_buffer([1, 512, 58, 58], dtype="float32")
compute_1 = T.alloc_buffer([1, 512, 56, 56], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 512, 58, 58):
with T.block("pad_temp"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
pad_temp[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(i2_1 >= 1 and i2_1 < 57 and i3_1 >= 1 and i3_1 < 57, X[i0_1, i1_1, i2_1 - 1, i3_1 - 1], T.float32(0), dtype="float32")
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 512, 56, 56, 512, 3, 3):
with T.block("compute"):
nn, ff, yy, xx, rc, ry, rx = T.axi |
s.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
compute_1[nn, ff, yy, xx] = T.float32(0)
compute_1[nn, ff, yy, xx] = compute_1[nn, ff, yy, xx] + pad_temp[nn, rc, yy + ry, xx + rx] * W[ff, rc, ry, rx]
for i0, i1, i2, i3 in T.grid(1, 512, 56, 56):
with T.block("compute_1"):
i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3])
compute[i0_2, i1_2, i2_2, i3_2] = T.max((compute_1[i0_2, i1_2, i2_2, i3_2] + B[i1_2, 0, 0]) * bn_scale[i1_2, 0, 0] + bn_offset[i1_2, 0, 0], T.float32(0))
@tvm.script.ir_module
class MultiLevelTiledConv2D:
@T.prim_func
def main(var_X: T.handle, var_W: T.handle, var_B: T.handle, var_bn_scale: T.handle, var_bn_offset: T.handle, var_compute: T.handle) -> None:
X = T.match_buffer(var_X, [1, 512, 56, 56], dtype="float32")
W = T.match_buffer(var_W, [512, 512, 3, 3], dtype="float32")
B = T.match_buffer(var_B, [512, 1, 1], dtype="float32")
bn_scale = T.match_buffer(var_bn_scale, [512, 1, 1], dtype="float32")
bn_offset = T.match_buffer(var_bn_offset, [512, 1, 1], dtype="float32")
compute = T.match_buffer(var_compute, [1, 512, 56, 56], dtype="float32")
pad_temp = T.alloc_buffer([1, 512, 58, 58], dtype="float32")
compute_1 = T.alloc_buffer([1, 512, 56, 56], dtype="float32")
compute_local = T.alloc_buffer([1, 512, 56, 56], dtype="float32", scope="local")
pad_temp_shared = T.alloc_buffer([1, 512, 58, 58], dtype="float32", scope="shared")
W_shared = T.alloc_buffer([512, 512, 3, 3], dtype="float32", scope="shared")
for i0, i1, i2, i3 in T.grid(1, 512, 58, 58):
with T.block("pad_temp"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
pad_temp[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(i2_1 >= 1 and i2_1 < 57 and i3_1 >= 1 and i3_1 < 57, X[i0_1, i1_1, i2_1 - 1, i3_1 - 1], T.float32(0), dtype="float32")
for i0_0_i1 |
_0_i2_0_i3_0_fused in T.thread_binding(0, 224, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(0, 2, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(0, 8, thread="threadIdx.x"):
for i4_0, i5_0, i6_0 in T.grid(1, 3, 1):
for ax0_ax1_ax2_ax3_fused_0 in T.serial(0, 40960, annotations={"meta_schedule.cooperative_fetch":1}):
for ax0_ax1_ax2_ax3_fused_1 in T.vectorized(0, 3):
with T.block("pad_temp_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(512, (ax0_ax1_ax2_ax3_fused_0 * 3 + ax0_ax1_ax2_ax3_fused_1)
v2 = T.axis.spatial(58, i0_0_i1_0_i2_0_i3_0_fused % 14
v3 = T.axis.spatial(58, i0_0_i1_0_i2_0_i3_0_fused % 2 * 28 + (ax0_ax1_ax2_ax3_fused_0 * 3 + ax0_ax1_ax2_ax3_fused_1) % 30)
pad_temp_shared[v0, v1, v2, v3] = pad_temp[v0, v1, v2, v3]
for ax0_ax1_ax2_ax3_fused_0 in T.serial(0, 12288, annotations={"meta_schedule.cooperative_fetch":1}):
for ax0_ax1_ax2_ax3_fused_1 in T.vectorized(0, 4):
with T.block("W_shared"):
v0 = T.axis.spatial(512, i0_0_i1_0_i2_0_i3_0_fused
v1 = T.axis.spatial(512, (ax0_ax1_ax2_ax3_fused_0 * 4 + ax0_ax1_ax2_ax3_fused_1)
v2 = T.axis.spatial(3, i5_0)
v3 = T.axis.spatial(3, (ax0_ax1_ax2_ax3_fused_0 * 4 + ax0_ax1_ax2_ax3_fused_1) % 3)
W_shared[v0, v1, v2, v3] = W[v0, v1, v2, v3]
for i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3, i4_2, i5_2, i6_2, i0_4, i1_4, i2_4, i3_4 in T.grid(32, 1, 1, 1, 1, 1, 1, 16, 1, 3, 1, 8, 2, 28):
with T.b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.