text
stringlengths 1
2.05k
|
---|
2):
with T.block("A_reindex_shared"):
v0 = T.axis.spatial(128, ax0_ax1_fused
v1 = T.axis.spatial(128, ax2_0_0 * 64 + ax0_ax1_fused % 64)
T.reads(A[v0, v1])
T.writes(A_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":1})
A_reindex_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused in T.serial(8192):
with T.block("B_reindex_shared"):
v0 = T.axis.spatial(128, ax2_0_0 * 64 + ax0_ax1_fused
v1 = T.axis.spatial(128, ax0_ax1_fused % 128)
T.reads(B[v0, v1])
T.writes(B_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":1})
B_reindex_shared[v0, v1] = B[v0, v1]
for ax2_0_1 in T.serial(2):
for ax0_0, ax1_0 in T.grid(1, 2):
with T.block("A_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused
v1_o = T.axis.spatial(8, ax2_0_0 * 4 + ax2_0_1 * 2 + ax1_0)
T.reads(A_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_a"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("A_reindex_shared_wmma.matrix_a"): |
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0, ax1_0 in T.grid(2, 4):
with T.block("B_reindex_shared_wmma.matrix_b_o"):
v0_o = T.axis.spatial(8, ax2_0_0 * 4 + ax2_0_1 * 2 + ax0_0)
v1_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused % 2 * 4 + ax1_0)
T.reads(B_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_b"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("B_reindex_shared_wmma.matrix_b"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 4, 2, 1, 1):
with T.block("C_o"):
v0_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fuse |
d
v1_o = T.axis.spatial(8, ax1_0_4 + ax0_0_2_ax1_0_2_fused % 2 * 4 + ax1_0_3)
v2_o = T.axis.reduce(8, ax2_0_0 * 4 + ax2_0_1 * 2 + ax2_0_2)
T.reads(A_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16], B_reindex_shared_wmma_matrix_b[v2_o * 16 : v2_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_f32", "warp_execution":1})
with T.init():
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_init"):
v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads()
T.writes(C_reindex_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init])
C_reindex_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init] = T.float32(0)
for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16):
with T.block("C"):
v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1])
T.reads(C_reindex_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i], A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i])
T.writes(C_reindex_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.bl |
ock_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
C_reindex_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] + T.cast(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], "float32") * T.cast(B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i], "float32")
for ax0_0, ax1_0 in T.grid(1, 4):
with T.block("C_reindex_wmma.accumulator_o"):
v0_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused
v1_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused % 2 * 4 + ax1_0)
T.reads(C_reindex_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_f32_global"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_reindex_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(C_reindex_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(C[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
C[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for i0, i1 in T.grid(128, 128):
with T.block("compute"):
i0_1, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(C[i0_1, i1_1])
T.writes(compute[i0_1, i1_1])
compute[i0_1, i1_1] = T.max(C[i0_1, i1_1], T.float32(0))
decision_0 = [
("SamplePerfectTile", [1, 1, 8, 1, 1]),
("SamplePerfectTile", [1, 1, 2, 4, 1]),
("SamplePerfectTile", [2, 2, 2]),
("SampleCa |
tegorical", 0),
("SampleCategorical", 0),
]
mod = te.create_prim_func(
te_workload.matmul_relu(
n=128,
m=128,
k=128,
in_dtype="float16",
out_dtype="float32",
)
)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=tvm.target.Target("cuda"),
types=None,
sch_rules=[multi_level_tiling_tensor_core(write_reuse_scope="global")]
+ get_rules("cuda", ms.schedule_rule.AutoInline),
)
check_sketches(
mod,
sketches=actual,
expected_mods=[matmul_relu_global_0],
expected_decisions=[decision_0],
)
def test_matmul_relu_non_tensorizable():
mod = te.create_prim_func(
te_workload.matmul_relu(
n=128,
m=128,
k=128,
)
)
(sch,) = generate_design_space(
kind="cuda",
mod=mod,
target=tvm.target.Target("cuda"),
types=None,
sch_rules=[multi_level_tiling_tensor_core(write_reuse_scope="global")]
+ get_rules("cuda", ms.schedule_rule.AutoInline),
)
tvm.ir.assert_structural_equal(mod, sch.mod["main"])
def test_padded_matmul_relu():
@T.prim_func
def padded_matmul_relu_0(A: T.Buffer[(127, 127), "float16"], B: T.Buffer[(127, 127), "float16"], compute: T.Buffer[(127, 127), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_reindex_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
C_reindex_shared_wmma_accumulator = T.alloc_buffer([128, 128], dtype="float32", scope="wmma.accumulator")
A_reindex_shared = T.alloc_buffer([128, 128], dtype="float16", scope="shared")
B_reindex_shared = T.alloc_buffer([128, 128], dtype="float16", scope="shared")
A_reindex_shared_wmma_matrix_a = T.alloc_buffer([128, 128], dtype="float16", scope="wmma.matrix_a")
B_reindex_shared_wmma_matrix_b = T.alloc_buf |
fer([128, 128], dtype="float16", scope="wmma.matrix_b")
for ax0_0_0_ax1_0_0_fused in T.thread_binding(8, thread="blockIdx.y"):
for ax0_0_1_ax1_0_1_fused in T.thread_binding(2, thread="blockIdx.x"):
for ax0_0_2_ax1_0_2_fused in T.thread_binding(2, thread="threadIdx.y"):
for ax2_0_0 in T.serial(1):
for ax0_ax1_fused in T.serial(4096):
with T.block("A_reindex_shared"):
v0 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused
v1 = T.axis.spatial(128, ax0_ax1_fused % 128)
T.reads(A[v0, v1])
T.writes(A_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":8})
A_reindex_shared[v0, v1] = T.if_then_else(v0 < 127 and v1 < 127, A[v0, v1], T.float16(0), dtype="float16")
for ax0_ax1_fused in T.serial(4096):
with T.block("B_reindex_shared"):
v0 = T.axis.spatial(128, ax0_ax1_fused
v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused % 2 * 64 + ax0_0_1_ax1_0_1_fused * 32 + ax0_ax1_fused % 32)
T.reads(B[v0, v1])
T.writes(B_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":1})
B_reindex_shared[v0, v1] = T.if_then_else(v0 < 127 and v1 < 127, B[v0, v1], T.float16(0), dtype="float16")
for ax2_0_1 in T.serial(4):
for ax0_0, ax1_0 in T.grid(2, 2):
with T.block("A_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused
v1_o = T. |
axis.spatial(8, ax2_0_1 * 2 + ax1_0)
T.reads(A_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_a"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("A_reindex_shared_wmma.matrix_a"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0, ax1_0 in T.grid(2, 1):
with T.block("B_reindex_shared_wmma.matrix_b_o"):
v0_o = T.axis.spatial(8, ax2_0_1 * 2 + ax0_0)
v1_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_2_ax1_0_2_fused)
T.reads(B_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_b"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("B_reindex_shared_wmma.matrix_b"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) |
T.reads(B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 1, 2, 2, 1):
with T.block("C_o"):
v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused
v1_o = T.axis.spatial(8, ax1_0_4 + ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_2_ax1_0_2_fused + ax1_0_3)
v2_o = T.axis.reduce(8, ax2_0_0 * 8 + ax2_0_1 * 2 + ax2_0_2)
T.reads(A_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16], B_reindex_shared_wmma_matrix_b[v2_o * 16 : v2_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_f32", "warp_execution":1})
with T.init():
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_init"):
v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads()
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init])
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init] |
= T.float32(0)
for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16):
with T.block("C"):
v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1])
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i], A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] + T.cast(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], "float32") * T.cast(B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i], "float32")
for ax0_0, ax1_0 in T.grid(2, 1):
with T.block("C_reindex_shared_wmma.accumulator_o"):
v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused
v1_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_2_ax1_0_2_fused)
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_f32_shared"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_reindex_shared_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) |
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
C_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_ax1_fused in T.serial(1024):
with T.block("C_reindex_shared"):
T.where(ax0_0_0_ax1_0_0_fused
v0 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused
v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused % 2 * 64 + ax0_0_1_ax1_0_1_fused * 32 + ax0_ax1_fused % 32)
T.reads(C_reindex_shared[v0, v1])
T.writes(compute[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch":4})
compute[v0, v1] = T.max(C_reindex_shared[v0, v1], T.float32(0))
decision_0 = [
("SamplePerfectTile", [4, 1, 1, 1, 2]),
("SamplePerfectTile", [2, 2, 2, 1, 1]),
("SamplePerfectTile", [1, 4, 2]),
("SampleCategorical", 3),
("SampleCategorical", 3),
("SampleCategorical", 0),
]
mod = te.create_prim_func(
te_workload.matmul_relu(
n=127,
m=127,
k=127,
in_dtype="float16",
out_dtype="float32",
)
)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=tvm.target.Target("cuda"),
types=None,
sch_rules=[multi_level_tiling_tensor_core(write_reuse_scope="shared")]
+ get_rules("cuda", ms.schedule_rule.AutoInline),
)
check_sketches(
mod,
sketches=actual,
expected_mods=[padded_matmul_relu_0],
expected_decisions=[decision_0],
)
def test_conv_1x1():
@T.prim_func
def conv2d_1x1_0(inputs: T.Buffer[(1, 16, 16, 64), "float16"], weight: T.Buffer[(1, 1, 64, 64), "fl |
oat16"], conv2d_nhwc: T.Buffer[(1, 16, 16, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
conv2d_nhwc_reindex_shared = T.alloc_buffer([256, 64], dtype="float32", scope="shared")
conv2d_nhwc_reindex_shared_wmma_accumulator = T.alloc_buffer([256, 64], dtype="float32", scope="wmma.accumulator")
PadInput_reindex_shared = T.alloc_buffer([256, 64], dtype="float16", scope="shared")
weight_reindex_shared = T.alloc_buffer([1, 1, 64, 64], dtype="float16", scope="shared")
PadInput_reindex_shared_wmma_matrix_a = T.alloc_buffer([256, 64], dtype="float16", scope="wmma.matrix_a")
weight_reindex_shared_wmma_matrix_b = T.alloc_buffer([1, 1, 64, 64], dtype="float16", scope="wmma.matrix_b")
for ax2_0_0_ax3_0_0_fused in T.thread_binding(16, thread="blockIdx.y"):
for ax2_0_1_ax3_0_1_fused in T.thread_binding(2, thread="blockIdx.x"):
for ax2_0_2_ax3_0_2_fused in T.thread_binding(2, thread="threadIdx.y"):
for ax0_0, ax1_0, ax4_0_0 in T.grid(1, 1, 1):
for ax0_ax1_fused in T.serial(1024):
with T.block("PadInput_reindex_shared"):
v0 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused
v1 = T.axis.spatial(64, ax0_ax1_fused % 64)
T.reads(inputs[v0
T.writes(PadInput_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":1})
PadInput_reindex_shared[v0, v1] = inputs[v0
for ax0_ax1_ax2_ax3_fused in T.serial(2048):
with T.block("weight_reindex_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(1, 0)
v2 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused |
v3 = T.axis.spatial(64, ax2_0_0_ax3_0_0_fused % 2 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_reindex_shared[v0, v1, v2, v3])
T.block_attr({"buffer_dim_align":[[0, 2, 32, 8]], "meta_schedule.cooperative_fetch":4})
weight_reindex_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for ax0_1, ax1_1, ax4_0_1 in T.grid(1, 1, 1):
for ax0_0_1, ax1_0_1 in T.grid(1, 4):
with T.block("PadInput_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused
v1_o = T.axis.spatial(4, ax1_0_1)
T.reads(PadInput_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_a"})
for ax0_1_1, ax1_1_1 in T.grid(16, 16):
with T.block("PadInput_reindex_shared_wmma.matrix_a"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1_1, ax1_1_1])
T.reads(PadInput_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = PadInput_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0, ax1, ax2_0, ax3_0 in T.grid(1, 1, 4, 1):
with T.block("weight_reindex_shared |
_wmma.matrix_b_o"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(1, 0)
v2_o = T.axis.spatial(4, ax2_0)
v3_o = T.axis.spatial(4, ax2_0_0_ax3_0_0_fused % 2 * 2 + ax2_0_2_ax3_0_2_fused)
T.reads(weight_reindex_shared[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.writes(weight_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_b"})
for ax2_1, ax3_1 in T.grid(16, 16):
with T.block("weight_reindex_shared_wmma.matrix_b"):
v2_i, v3_i = T.axis.remap("SS", [ax2_1, ax3_1])
T.reads(weight_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i])
T.writes(weight_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i])
weight_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] = weight_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]
for ax2_0_3, ax3_0_3, ax0_2, ax1_2, ax4_0_2, ax2_0_4, ax3_0_4 in T.grid(1, 1, 1, 1, 4, 1, 1):
with T.block("conv2d_nhwc_o"):
v0 = T.axis.reduce(1, 0)
v1 = T.axis.reduce(1, 0)
v2_o = T.axis.spatial(16, ax2_0_4 + ax2_0_0_ax3_0_0_fused
v3_o = T.axis.spatial(4, ax3_0_4 + ax2_0_0_ax3_0_0_fused % 2 * 2 + ax2_0_2_ax3_0_2_fused + ax3_0_3)
v4_o = T.axis.reduce(4, ax4_0_0 * 4 + ax4_0_1 * 4 + ax4_0 |
_2)
T.reads(PadInput_reindex_shared_wmma_matrix_a[v2_o * 16 : v2_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16], weight_reindex_shared_wmma_matrix_b[v0, v1, v4_o * 16 : v4_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_f32", "warp_execution":1})
with T.init():
for ax2_1, ax3_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_init"):
v2_i_init, v3_i_init = T.axis.remap("SS", [ax2_1, ax3_1])
T.reads()
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init])
conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init] = T.float32(0)
for ax2_1, ax3_1, ax4_1 in T.grid(16, 16, 16):
with T.block("conv2d_nhwc"):
v2_i, v3_i, v4_i = T.axis.remap("SSR", [ax2_1, ax3_1, ax4_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i], PadInput_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], weight_reindex_shared_wmma_matrix_b[v0, v1, v4_o * 16 + v4_i, v3_o * 16 + v3_i])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"}) |
conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] + T.cast(PadInput_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], "float32") * T.cast(weight_reindex_shared_wmma_matrix_b[v0, v1, v4_o * 16 + v4_i, v3_o * 16 + v3_i], "float32")
for ax0_0, ax1_0 in T.grid(1, 1):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator_o"):
v0_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused
v1_o = T.axis.spatial(4, ax2_0_0_ax3_0_0_fused % 2 * 2 + ax2_0_2_ax3_0_2_fused)
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_f32_shared"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_ax1_fused in T.serial(512):
with T.block("conv2d_nhwc_reindex_shared"):
v0 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused
v1 = T.axis.spatial(64, ax2_0_0_ax3_0_0_fused % 2 * 32 + ax0_ax1_fused % 32) |
T.reads(conv2d_nhwc_reindex_shared[v0, v1])
T.writes(conv2d_nhwc[v0
T.block_attr({"meta_schedule.cooperative_fetch":2})
conv2d_nhwc[v0
decision_0 = [
("SamplePerfectTile", [1, 1, 1]),
("SamplePerfectTile", [1, 1, 1]),
("SamplePerfectTile", [8, 2, 1, 1, 1]),
("SamplePerfectTile", [2, 1, 2, 1, 1]),
("SamplePerfectTile", [1, 1, 4]),
("SampleCategorical", 1),
("SampleCategorical", 0),
("SampleCategorical", 2),
]
mod = te.create_prim_func(
te_workload.conv2d_nhwc(
1,
16,
16,
64,
64,
1,
1,
0,
in_dtype="float16",
out_dtype="float32",
)
)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=tvm.target.Target("cuda"),
types=None,
sch_rules=[multi_level_tiling_tensor_core(write_reuse_scope="shared")]
+ get_rules("cuda", ms.schedule_rule.AutoInline),
)
check_sketches(
mod,
sketches=actual,
expected_mods=[conv2d_1x1_0],
expected_decisions=[decision_0],
)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
)
from tvm.script |
import tir as T
from tvm.target |
import Target
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class ParallelizeVectorizeUnroll:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
with T.block("root"):
T.reads([])
T.writes([])
T.block_attr({"meta_schedule.parallel": 128, "meta_schedule.vectorize": 16, "meta_schedule.unroll_explicit": 2})
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class PureSpatial:
@T.prim_func
def main(placeholder: T.Buffer[(1, 13, 13, 3, 85), "float32"], placeholder_1: T.Buffer[(1, 26, 26, 3, 85), "float32"], placeholder_2: T.Buffer[(1, 52, 52, 3, 85), "float32"], T_expand_dims: T.Buffer[(1, 80, 10647), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
T_strided_slice_with_axes = T.alloc_buffer([1, 52, 52, 3, 1], dtype="float32")
T_sigmoid = T.alloc_buffer([1, 52, 52, 3, 1], dtype="float32")
T_strided_slice_with_axes_1 = T.alloc_buffer( |
[1, 52, 52, 3, 80], dtype="float32")
T_sigmoid_1 = T.alloc_buffer([1, 52, 52, 3, 80], dtype="float32")
T_multiply = T.alloc_buffer([1, 52, 52, 3, 80], dtype="float32")
T_reshape = T.alloc_buffer([8112, 80], dtype="float32")
T_strided_slice_with_axes_2 = T.alloc_buffer([1, 26, 26, 3, 1], dtype="float32")
T_sigmoid_2 = T.alloc_buffer([1, 26, 26, 3, 1], dtype="float32")
T_strided_slice_with_axes_3 = T.alloc_buffer([1, 26, 26, 3, 80], dtype="float32")
T_sigmoid_3 = T.alloc_buffer([1, 26, 26, 3, 80], dtype="float32")
T_multiply_1 = T.alloc_buffer([1, 26, 26, 3, 80], dtype="float32")
T_reshape_1 = T.alloc_buffer([2028, 80], dtype="float32")
T_strided_slice_with_axes_4 = T.alloc_buffer([1, 13, 13, 3, 1], dtype="float32")
T_sigmoid_4 = T.alloc_buffer([1, 13, 13, 3, 1], dtype="float32")
T_strided_slice_with_axes_5 = T.alloc_buffer([1, 13, 13, 3, 80], dtype="float32")
T_sigmoid_5 = T.alloc_buffer([1, 13, 13, 3, 80], dtype="float32")
T_multiply_2 = T.alloc_buffer([1, 13, 13, 3, 80], dtype="float32")
T_reshape_2 = T.alloc_buffer([507, 80], dtype="float32")
T_concat = T.alloc_buffer([10647, 80], dtype="float32")
T_transpose = T.alloc_buffer([80, 10647], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 52, 52, 3, 1):
with T.block("T_strided_slice_with_axes"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder_2[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)])
T.writes(T_strided_slice_with_axes[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes[ax0, ax1, ax2, ax3, ax4] = placeholder_2[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)]
for i0, i1, i2, i3, i4 in T.grid(1, 52, 52, 3, 1):
with T.block("T_sigmoid"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_wit |
h_axes[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid[ax0, ax1, ax2, ax3, ax4])
T_sigmoid[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 52, 52, 3, 80):
with T.block("T_strided_slice_with_axes_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder_2[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)])
T.writes(T_strided_slice_with_axes_1[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes_1[ax0, ax1, ax2, ax3, ax4] = placeholder_2[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)]
for i0, i1, i2, i3, i4 in T.grid(1, 52, 52, 3, 80):
with T.block("T_sigmoid_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes_1[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid_1[ax0, ax1, ax2, ax3, ax4])
T_sigmoid_1[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes_1[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 52, 52, 3, 80):
with T.block("T_multiply"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_sigmoid[ax0, ax1, ax2, ax3, 0], T_sigmoid_1[ax0, ax1, ax2, ax3, ax4])
T.writes(T_multiply[ax0, ax1, ax2, ax3, ax4])
T_multiply[ax0, ax1, ax2, ax3, ax4] = T_sigmoid[ax0, ax1, ax2, ax3, 0] * T_sigmoid_1[ax0, ax1, ax2, ax3, ax4]
for i0, i1 in T.grid(8112, 80):
with T.block("T_reshape"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_multiply[0, (ax1
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = T_multiply[0, (ax1
for i0, i1, i2, i3, i4 in T.grid(1, 26, 26, 3, 1):
with T.block("T_st |
rided_slice_with_axes_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder_1[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)])
T.writes(T_strided_slice_with_axes_2[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes_2[ax0, ax1, ax2, ax3, ax4] = placeholder_1[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)]
for i0, i1, i2, i3, i4 in T.grid(1, 26, 26, 3, 1):
with T.block("T_sigmoid_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes_2[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid_2[ax0, ax1, ax2, ax3, ax4])
T_sigmoid_2[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes_2[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 26, 26, 3, 80):
with T.block("T_strided_slice_with_axes_3"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder_1[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)])
T.writes(T_strided_slice_with_axes_3[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes_3[ax0, ax1, ax2, ax3, ax4] = placeholder_1[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)]
for i0, i1, i2, i3, i4 in T.grid(1, 26, 26, 3, 80):
with T.block("T_sigmoid_3"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes_3[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid_3[ax0, ax1, ax2, ax3, ax4])
T_sigmoid_3[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes_3[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 26, 26, 3, 80):
with T.block("T_multiply_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, |
i3, i4])
T.reads(T_sigmoid_2[ax0, ax1, ax2, ax3, 0], T_sigmoid_3[ax0, ax1, ax2, ax3, ax4])
T.writes(T_multiply_1[ax0, ax1, ax2, ax3, ax4])
T_multiply_1[ax0, ax1, ax2, ax3, ax4] = T_sigmoid_2[ax0, ax1, ax2, ax3, 0] * T_sigmoid_3[ax0, ax1, ax2, ax3, ax4]
for i0, i1 in T.grid(2028, 80):
with T.block("T_reshape_1"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_multiply_1[0, (ax1
T.writes(T_reshape_1[ax0, ax1])
T_reshape_1[ax0, ax1] = T_multiply_1[0, (ax1
for i0, i1, i2, i3, i4 in T.grid(1, 13, 13, 3, 1):
with T.block("T_strided_slice_with_axes_4"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)])
T.writes(T_strided_slice_with_axes_4[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes_4[ax0, ax1, ax2, ax3, ax4] = placeholder[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)]
for i0, i1, i2, i3, i4 in T.grid(1, 13, 13, 3, 1):
with T.block("T_sigmoid_4"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes_4[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid_4[ax0, ax1, ax2, ax3, ax4])
T_sigmoid_4[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes_4[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 13, 13, 3, 80):
with T.block("T_strided_slice_with_axes_5"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)])
T.writes(T_strided_slice_with_axes_5[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes_5[ax0, ax1, ax2, ax3, ax4] = placeholder[ax0, ax1, ax2, ax3, T.c |
ast(ax4, "int64") + T.int64(5)]
for i0, i1, i2, i3, i4 in T.grid(1, 13, 13, 3, 80):
with T.block("T_sigmoid_5"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes_5[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid_5[ax0, ax1, ax2, ax3, ax4])
T_sigmoid_5[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes_5[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 13, 13, 3, 80):
with T.block("T_multiply_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_sigmoid_4[ax0, ax1, ax2, ax3, 0], T_sigmoid_5[ax0, ax1, ax2, ax3, ax4])
T.writes(T_multiply_2[ax0, ax1, ax2, ax3, ax4])
T_multiply_2[ax0, ax1, ax2, ax3, ax4] = T_sigmoid_4[ax0, ax1, ax2, ax3, 0] * T_sigmoid_5[ax0, ax1, ax2, ax3, ax4]
for i0, i1 in T.grid(507, 80):
with T.block("T_reshape_2"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_multiply_2[0, (ax1
T.writes(T_reshape_2[ax0, ax1])
T_reshape_2[ax0, ax1] = T_multiply_2[0, (ax1
for i0, i1 in T.grid(10647, 80):
with T.block("T_concat"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_reshape[ax0 - 2535, ax1], T_reshape_1[ax0 - 507, ax1], T_reshape_2[ax0, ax1])
T.writes(T_concat[ax0, ax1])
T_concat[ax0, ax1] = T.if_then_else(2535 <= ax0, T_reshape[ax0 - 2535, ax1], T.if_then_else(507 <= ax0, T_reshape_1[ax0 - 507, ax1], T_reshape_2[ax0, ax1], dtype="float32"), dtype="float32")
for i0, i1 in T.grid(80, 10647):
with T.block("T_transpose"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_concat[ax1, ax0])
T.writes(T_transpose[ax0, ax1])
T_transpose[ax0, ax1] = T_concat[ax1, ax0] |
for i0, i1, i2 in T.grid(1, 80, 10647):
with T.block("T_expand_dims"):
ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2])
T.reads(T_transpose[ax1, ax2])
T.writes(T_expand_dims[ax0, ax1, ax2])
T_expand_dims[ax0, ax1, ax2] = T_transpose[ax1, ax2]
def test_parallel_vectorize_unroll():
@T.prim_func
def Matmul_0(
A: T.Buffer[(1024, 1024), "float32"],
B: T.Buffer[(1024, 1024), "float32"],
C: T.Buffer[(1024, 1024), "float32"],
) -> None:
T.func_attr({"global_symbol": "main"})
with T.block("root"):
T.reads()
T.writes()
T.block_attr(
{
"meta_schedule.parallel": 512,
"meta_schedule.unroll_explicit": 16,
"meta_schedule.vectorize": 32,
}
)
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.reads(A[vi, vk], B[vk, vj])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
decision_0 = [
("SampleCategorical", 1),
]
mod = Matmul
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm --num-cores=32"),
types=None,
sch_rules=[
ms.schedule_rule.ParallelizeVectorizeUnroll(
max_jobs_per_core=16,
max_vectorize_extent=32,
unroll_max_steps=[0, 16, 64, 512],
unroll_explicit=True,
),
],
)
check_sketches(
mod,
sketches=actual,
expected_mods=[Matmul_0],
expected_decisions=[decision_0],
)
def test_parallel_vectorize_unroll_spatial():
mod = PureSpatial
actual = |
generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm --num-cores=32"),
types=None,
sch_rules=[
ms.schedule_rule.ParallelizeVectorizeUnroll(
max_jobs_per_core=-1,
max_vectorize_extent=-1,
unroll_max_steps=[0, 16, 64, 512],
unroll_explicit=True,
),
],
)
assert len(actual) == 1
trace = actual[0].trace.simplified(remove_postproc=True)
assert not trace.insts
if __name__ == "__main__":
test_parallel_vectorize_unroll()
test_parallel_vectorize_unroll_spatial() |
import tvm
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
)
from tvm.script |
import tir as T
from tvm.target |
import Target
@tvm.script.ir_module
class Add:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, [2048, 2048, 2048], dtype="float32")
B = T.match_buffer(b, [2048, 2048, 2048], dtype="float32")
A_cached = T.alloc_buffer([2048, 2048, 2048], dtype="float32")
for i, j, k in T.grid(2048, 2048, 2048):
with T.block("move"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([A_cached[vi, vj, vk]])
A_cached[vi, vj, vk] = A[vi, vj, vk]
for i0, j0, i1, j1, k0, i2, j2, k1 in T.grid(128, 64, 4, 4, 64, 4, 8, 32):
with T.block("add"):
vi = T.axis.spatial(2048, i0 * 16 + i1 * 4 + i2)
vj = T.axis.spatial(2048, j0 * 32 + j1 * 8 + j2)
vk = T.axis.spatial(2048, k0 * 32 + k1)
T.reads([A_cached[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A_cached[vi, vj, vk] + T.float32(1)
def test_random_compute_location():
@T.prim_func
def add_0(
A: T.Buffer[(2048, 2048, 2048), "float32"],
B: T.Buffer[(2048, 2048, 2048), "float32"],
) -> None:
T.func_attr({"global_symbol": "main"})
A_cached = T.alloc_buffer([2048, 2048, 2048], dtype="float32")
for i0, j0, i1, j1, k0, i2 in T.grid(128, 64, 4, 4, 64, 4):
for ax0, ax1, ax2 in T.grid(1, 8, 32):
with T.block("move"):
vi = T.axis.spatial(2048, i0 * 16 + i1 * 4 + i2 + ax0)
vj = T.axis.spatial(2048, j0 * 32 + j1 * 8 + ax1)
vk = T.axis.spatial(2048, k0 * 32 + ax2)
T.reads(A[vi, vj, vk])
T.writes(A_cached[vi, vj, vk])
A_cached[vi, vj, vk] = A[vi, vj, vk]
for j2, k1 in T.grid(8, 32):
with T.b |
lock("add"):
vi = T.axis.spatial(2048, i0 * 16 + i1 * 4 + i2)
vj = T.axis.spatial(2048, j0 * 32 + j1 * 8 + j2)
vk = T.axis.spatial(2048, k0 * 32 + k1)
T.reads(A_cached[vi, vj, vk])
T.writes(B[vi, vj, vk])
B[vi, vj, vk] = A_cached[vi, vj, vk] + T.float32(1)
decision_0 = [
("SampleComputeLocation", 5),
]
mod = Add
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm"),
types=None,
sch_rules=[ms.schedule_rule.RandomComputeLocation()],
)
check_sketches(
mod,
sketches=actual,
expected_mods=[add_0],
expected_decisions=[decision_0],
)
if __name__ == "__main__":
test_random_compute_location() |
""" Test Meta Schedule SearchStrategy """
from typing |
import List |
import pytest |
import tvm |
import tvm.testing
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.utils |
import derived_object
from tvm.meta_schedule.testing.dummy_object |
import DummyMutator
from tvm.script |
import tir as T
from tvm.tir.schedule |
import Schedule, Trace
MATMUL_M = 32
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (32, 32), "float32")
B = T.match_buffer(b, (32, 32), "float32")
C = T.match_buffer(c, (32, 32), "float32")
for i, j, k in T.grid(32, 32, 32):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
def _is_trace_equal(sch_1: Schedule, sch_2: Schedule, remove_decisions=True) -> bool:
if remove_decisions:
trace_1 = Trace(sch_1.trace.insts, {})
trace_2 = Trace(sch_2.trace.insts, {})
else:
trace_1 = sch_1.trace
trace_2 = sch_2.trace
return str(trace_1) == str(trace_2)
def _schedule_matmul(sch: Schedule):
block = sch.get_block("matmul")
i, j, k = sch.get_loops(block=block)
i_0, i_1, i_2, i_3 = sch.split(i, sch.sample_perfect_tile(i, n=4))
j_0, j_1, j_2, j_3 = sch.split(j, sch.sample_perfect_tile(j, n=4))
k_0, k_1 = sch.split(k, sch.sample_perfect_tile(k, n=2))
sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
@pytest.mark.parametrize(
"TestClass",
[
ms.search_strategy.ReplayFunc,
ms.search_strategy.ReplayTrace,
],
)
def test_meta_schedule_replay_func(
TestClass: ms.search_strategy.SearchStrategy,
):
num_trials_per_iter = 7
max_trials_per_task = 20
context = ms.TuneContext(
mod=Matmul,
space_generator=ms.space_generator.ScheduleFn(sch_fn=_schedule_matmul, postprocs=[]),
search_strategy=TestClass(),
)
strategy = context.search_strategy
spaces = context.space_generator.generate_design_space(context.mod)
strategy.pre_tuning(
max_trials=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
design_sp |
aces=spaces,
)
(correct_sch,) = ms.space_generator.ScheduleFn(sch_fn=_schedule_matmul).generate_design_space(
Matmul
)
num_trials_each_iter: List[int] = []
candidates = strategy.generate_measure_candidates()
while candidates is not None:
num_trials_each_iter.append(len(candidates))
runner_results: List[ms.runner.RunnerResult] = []
for candidate in candidates:
_is_trace_equal(
candidate.sch,
correct_sch,
remove_decisions=(isinstance(strategy, ms.search_strategy.ReplayTrace)),
)
runner_results.append(
ms.runner.RunnerResult(
run_secs=[0.11, 0.41, 0.54],
error_msg=None,
)
)
strategy.notify_runner_results(candidates, runner_results)
candidates = strategy.generate_measure_candidates()
strategy.post_tuning()
assert num_trials_each_iter == [7, 7, 6]
def test_meta_schedule_evolutionary_search():
def _schedule_matmul_small(sch: Schedule):
block = sch.get_block("matmul")
_, j, k = sch.get_loops(block=block)
_, _ = sch.split(j, sch.sample_perfect_tile(j, n=2))
_, _ = sch.split(k, sch.sample_perfect_tile(k, n=2))
num_trials_per_iter = 10
max_trials_per_task = 2000
(correct_sch,) = ms.space_generator.ScheduleFn(sch_fn=_schedule_matmul).generate_design_space(
Matmul
)
context = ms.TuneContext(
mod=Matmul,
space_generator=ms.space_generator.ScheduleFn(
sch_fn=_schedule_matmul_small,
sch_rules=[],
postprocs=[],
mutator_probs={
DummyMutator(): 1.0,
},
),
search_strategy=ms.search_strategy.EvolutionarySearch(
population_size=5,
init_measured_ratio=0.1,
init_min_unmeasured=50,
genetic_num_iters=3,
genetic_mutate_prob=0.5,
genetic_max_fail_coun |
t=10,
eps_greedy=0.9,
),
target=tvm.target.Target("llvm"),
num_threads=1,
)
strategy = context.search_strategy
strategy.pre_tuning(
max_trials=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
design_spaces=context.space_generator.generate_design_space(context.mod),
database=ms.database.MemoryDatabase(),
cost_model=ms.cost_model.RandomModel(),
)
num_trials_each_iter: List[int] = []
candidates = strategy.generate_measure_candidates()
while candidates is not None:
num_trials_each_iter.append(len(candidates))
runner_results: List[ms.runner.RunnerResult] = []
for candidate in candidates:
_is_trace_equal(
candidate.sch,
correct_sch,
remove_decisions=(isinstance(strategy, ms.search_strategy.ReplayTrace)),
)
runner_results.append(
ms.runner.RunnerResult(
run_secs=[0.11, 0.41, 0.54],
error_msg=None,
)
)
strategy.notify_runner_results(candidates, runner_results)
candidates = strategy.generate_measure_candidates()
strategy.post_tuning()
assert sum(num_trials_each_iter) == 25
assert num_trials_each_iter.count(0) < 5
def test_meta_schedule_evolutionary_search_early_stop():
def _schedule_matmul_empty(sch: Schedule):
return sch
(correct_sch,) = ms.space_generator.ScheduleFn(sch_fn=_schedule_matmul).generate_design_space(
Matmul
)
num_trials_per_iter = 10
max_trials_per_task = 100
context = ms.TuneContext(
mod=Matmul,
search_strategy=ms.search_strategy.EvolutionarySearch(
population_size=5,
init_measured_ratio=0.1,
init_min_unmeasured=50,
genetic_num_iters=3,
genetic_mutate_prob=0.5,
genetic_max_fail_count=10,
eps_greedy=0.9,
),
space_g |
enerator=ms.space_generator.ScheduleFn(
sch_fn=_schedule_matmul_empty,
sch_rules=[],
postprocs=[],
mutator_probs={
DummyMutator(): 1.0,
},
),
target=tvm.target.Target("llvm"),
num_threads=1,
)
strategy = context.search_strategy
strategy.pre_tuning(
max_trials=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
design_spaces=context.space_generator.generate_design_space(context.mod),
database=ms.database.MemoryDatabase(),
cost_model=ms.cost_model.RandomModel(),
)
num_trials_each_iter: List[int] = []
candidates = strategy.generate_measure_candidates()
while candidates is not None:
num_trials_each_iter.append(len(candidates))
runner_results: List[ms.runner.RunnerResult] = []
for candidate in candidates:
_is_trace_equal(
candidate.sch,
correct_sch,
remove_decisions=(isinstance(strategy, ms.search_strategy.ReplayTrace)),
)
runner_results.append(
ms.runner.RunnerResult(
run_secs=[0.11, 0.41, 0.54],
error_msg=None,
),
)
strategy.notify_runner_results(candidates, runner_results)
candidates = strategy.generate_measure_candidates()
strategy.post_tuning()
assert num_trials_each_iter == [1, 0, 0, 0, 0]
def test_meta_schedule_evolutionary_search_fail_init_population():
@derived_object |
class AlwaysFailPostproc(ms.postproc.PyPostproc):
"""A postproc that always fails."""
def _initialize_with_tune_context(self, context: ms.TuneContext) -> None:
pass
def apply(self, sch: Schedule) -> bool:
return False
def clone(self) -> "AlwaysFailPostproc":
return AlwaysFailPostproc()
def __str__(self) -> str:
return "AlwaysFailPostproc"
num_trials_per_iter = 10
max_trials_per_task = 2000
context = ms.TuneContext(
mod=Matmul,
space_generator=ms.space_generator.ScheduleFn(
sch_fn=_schedule_matmul,
sch_rules=[],
postprocs=[AlwaysFailPostproc()],
mutator_probs={
DummyMutator(): 1.0,
},
),
search_strategy=ms.search_strategy.EvolutionarySearch(
population_size=5,
init_measured_ratio=0.1,
init_min_unmeasured=50,
genetic_num_iters=3,
genetic_mutate_prob=0.5,
genetic_max_fail_count=10,
eps_greedy=0.9,
),
target=tvm.target.Target("llvm"),
num_threads=1,
)
strategy = context.search_strategy
strategy.pre_tuning(
max_trials=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
design_spaces=context.space_generator.generate_design_space(context.mod),
database=ms.database.MemoryDatabase(),
cost_model=ms.cost_model.RandomModel(),
)
candidates = strategy.generate_measure_candidates()
assert candidates is None
if __name__ == "__main__":
test_meta_schedule_replay_func(ms.search_strategy.ReplayFunc)
test_meta_schedule_replay_func(ms.search_strategy.ReplayTrace)
test_meta_schedule_evolutionary_search()
test_meta_schedule_evolutionary_search_early_stop()
test_meta_schedule_evolutionary_search_fail_init_population() |
"""Tests for MetaSchedule search space on CPU"""
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
print_sketches,
generate_design_space,
)
from tvm.meta_schedule.testing.te_workload |
import create_te_workload
from tvm.script |
import tir as T
from tvm.target |
import Target
def _target():
return Target("aws/cpu/c5.9xlarge")
def _design_space(mod):
return generate_design_space(
kind="llvm",
mod=mod,
target=_target(),
types=ms.ScheduleRule,
)
def test_cpu_c1d():
@T.prim_func
def c1d_0(inputs: T.Buffer[(1, 256, 64), "float32"], weight: T.Buffer[(3, 64, 128), "float32"], conv1d_nlc: T.Buffer[(1, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 258, 64], dtype="float32")
conv1d_nlc_global = T.alloc_buffer([1, 128, 128], dtype="float32")
for i0, i1, i2 in T.grid(1, 258, 64):
with T.block("PadInput"):
i0_1, i1_1, i2_1 = T.axis.remap("SSS", [i0, i1, i2])
T.reads(inputs[i0_1, i1_1 - 1, i2_1])
T.writes(PadInput[i0_1, i1_1, i2_1])
PadInput[i0_1, i1_1, i2_1] = T.if_then_else(1 <= i1_1 and i1_1 < 257, inputs[i0_1, i1_1 - 1, i2_1], T.float32(0), dtype="float32")
for i0_0, i1_0, i2_0, i0_1_1, i1_1_1, i2_1_1 in T.grid(1, 1, 2, 1, 1, 8):
for i3_0, i4_0, i0_2, i1_2, i2_2, i3_1, i4_1, i0_3, i1_3, i2_3 in T.grid(1, 64, 1, 64, 8, 3, 1, 1, 2, 1):
with T.block("conv1d_nlc"):
n = T.axis.spatial(1, i0_1_1 + i0_2 + i0_3 + i0_0)
l = T.axis.spatial(128, i1_0 * 128 + i1_1_1 * 128 + i1_2 * 2 + i1_3)
co = T.axis.spatial(128, i2_3 + i2_0 * 64 + i2_1_1 * 8 + i2_2)
rl = T.axis.reduce(3, i3_0 * 3 + i3_1)
rc = T.axis.reduce(64, i4_1 + i4_0)
T.reads(PadInput[n, l * 2 + rl, co
T.writes(conv1d_nlc_global[n, l, |
co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv1d_nlc_global[n, l, co] = T.float32(0)
conv1d_nlc_global[n, l, co] = conv1d_nlc_global[n, l, co] + PadInput[n, l * 2 + rl, co
for ax0, ax1, ax2 in T.grid(1, 128, 8):
with T.block("conv1d_nlc_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
v2 = T.axis.spatial(128, i2_0 * 64 + i2_1_1 * 8 + ax2)
T.reads(conv1d_nlc_global[v0, v1, v2])
T.writes(conv1d_nlc[v0, v1, v2])
conv1d_nlc[v0, v1, v2] = conv1d_nlc_global[v0, v1, v2]
@T.prim_func
def c1d_1(inputs: T.Buffer[(1, 256, 64), "float32"], weight: T.Buffer[(3, 64, 128), "float32"], conv1d_nlc: T.Buffer[(1, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 258, 64], dtype="float32")
conv1d_nlc_global = T.alloc_buffer([1, 128, 128], dtype="float32")
for i0_0, i1_0, i2_0 in T.grid(1, 1, 2):
for i0_1, i1_1, i2_1 in T.grid(1, 1, 8):
for ax0, ax1, ax2 in T.grid(1, 257, 64):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(258, ax1)
i2 = T.axis.spatial(64, ax2)
T.reads(inputs[i0, i1 - 1, i2])
T.writes(PadInput[i0, i1, i2])
PadInput[i0, i1, i2] = T.if_then_else(1 <= i1 and i1 < 257, inputs[i0, i1 - 1, i2], T.float32(0), dtype="float32")
for i |
3_0, i4_0, i0_2, i1_2, i2_2, i3_1, i4_1, i0_3, i1_3, i2_3 in T.grid(1, 64, 1, 64, 8, 3, 1, 1, 2, 1):
with T.block("conv1d_nlc"):
n = T.axis.spatial(1, i0_1 + i0_2 + i0_3 + i0_0)
l = T.axis.spatial(128, i1_0 * 128 + i1_1 * 128 + i1_2 * 2 + i1_3)
co = T.axis.spatial(128, i2_3 + i2_0 * 64 + i2_1 * 8 + i2_2)
rl = T.axis.reduce(3, i3_0 * 3 + i3_1)
rc = T.axis.reduce(64, i4_1 + i4_0)
T.reads(PadInput[n, l * 2 + rl, co
T.writes(conv1d_nlc_global[n, l, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv1d_nlc_global[n, l, co] = T.float32(0)
conv1d_nlc_global[n, l, co] = conv1d_nlc_global[n, l, co] + PadInput[n, l * 2 + rl, co
for ax0, ax1, ax2 in T.grid(1, 128, 64):
with T.block("conv1d_nlc_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
v2 = T.axis.spatial(128, i2_0 * 64 + ax2)
T.reads(conv1d_nlc_global[v0, v1, v2])
T.writes(conv1d_nlc[v0, v1, v2])
conv1d_nlc[v0, v1, v2] = conv1d_nlc_global[v0, v1, v2]
@T.prim_func
def c1d_2(inputs: T.Buffer[(1, 256, 64), "float32"], weight: T.Buffer[(3, 64, 128), "float32"], conv1d_nlc: T.Buffer[(1, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
for i0_0, i1_0, i2_0, i0_1, i1_1, i2_1, i3_0, i4_0, i0_2, i1_2, i2_2, i3_1, i4_1, i0_3, i1_3, i2_3 in T.grid(1, 1, 2, 1, 1, 8, 1, 64, 1, 64, |
8, 3, 1, 1, 2, 1):
with T.block("conv1d_nlc"):
n = T.axis.spatial(1, i0_1 + i0_2 + i0_3 + i0_0)
l = T.axis.spatial(128, i1_0 * 128 + i1_1 * 128 + i1_2 * 2 + i1_3)
co = T.axis.spatial(128, i2_3 + i2_0 * 64 + i2_1 * 8 + i2_2)
rl = T.axis.reduce(3, i3_0 * 3 + i3_1)
rc = T.axis.reduce(64, i4_1 + i4_0)
T.reads(inputs[n, l * 2 + rl - 1, co
T.writes(conv1d_nlc[n, l, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv1d_nlc[n, l, co] = T.float32(0)
conv1d_nlc[n, l, co] = conv1d_nlc[n, l, co] + T.if_then_else(1 <= l * 2 + rl and l * 2 + rl < 257, inputs[n, l * 2 + rl - 1, co
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 64, 2]),
("SamplePerfectTile", [2, 8, 8, 1]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [64, 1]),
("SampleCategorical", 3),
("SampleComputeLocation", -1),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 64, 2]),
("SamplePerfectTile", [2, 8, 8, 1]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [64, 1]),
("SampleCategorical", 3),
("SampleComputeLocation", 5),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 64, 2]),
("SamplePerfectTile", [2, 8, 8, 1]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [64, 1]),
("SampleCategorical", 1),
("SampleComputeLocation", -2),
]
mod = create_te_workload("C1D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[c1d_0, c1d_1, c1d_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_c |
pu_c2d():
@T.prim_func
def c2d_0(inputs: T.Buffer[(1, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 3, 64), "float32"], conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
conv2d_nhwc_global = T.alloc_buffer([1, 112, 112, 64], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i0_1, i1_1, i2_1 in T.grid(1, 7, 4, 2, 1, 1, 28):
for ax0, ax1, ax2, ax3 in T.grid(1, 37, 7, 3):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(230, i1_0 * 32 + ax1)
i2 = T.axis.spatial(230, i2_0 * 56 + i2_1 * 2 + ax2)
i3 = T.axis.spatial(3, ax3)
T.reads(inputs[i0, i1 - 3, i2 - 3, i3])
T.writes(PadInput[i0, i1, i2, i3])
PadInput[i0, i1, i2, i3] = T.if_then_else(3 <= i1 and i1 < 227 and 3 <= i2 and i2 < 227, inputs[i0, i1 - 3, i2 - 3, i3], T.float32(0), dtype="float32")
for i3_1 in T.serial(8):
for i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(7, 7, 1, 1, 2, 1, 1, 1, 1, 3, 1, 8, 1, 4):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
h = T.axis.spatial(112, i1_0 * 16 + i1_1 * 16 + i1_2 * 8 + i1_3)
w = T.axis.spatial(112, i2_3 + i2_0 * 28 + i2_1 + i2_2)
co = T.axis.spatial(64, i3_0 * 32 + i3_1 * 4 + i3_2 * 4 + i3_3)
rh = T.axis.reduce(7, i4_1 + i4_0) |
rw = T.axis.reduce(7, i5_0 + i5_1)
rc = T.axis.reduce(3, i6_0 * 3 + i6_1)
T.reads(PadInput[n, h * 2 + rh, w * 2 + rw, co
T.writes(conv2d_nhwc_global[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_nhwc_global[n, h, w, co] = T.float32(0)
conv2d_nhwc_global[n, h, w, co] = conv2d_nhwc_global[n, h, w, co] + PadInput[n, h * 2 + rh, w * 2 + rw, co
for ax0, ax1, ax2, ax3 in T.grid(1, 16, 1, 4):
with T.block("conv2d_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(112, i1_0 * 16 + ax1)
v2 = T.axis.spatial(112, i2_0 * 28 + i2_1 + ax2)
v3 = T.axis.spatial(64, i3_0 * 32 + i3_1 * 4 + ax3)
T.reads(conv2d_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3]
@T.prim_func
def c2d_1(inputs: T.Buffer[(1, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 3, 64), "float32"], conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
conv2d_nhwc_global = T.alloc_buffer([1, 112, 112, 64], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SS |
SS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(3 <= i1_1 and i1_1 < 227 and 3 <= i2_1 and i2_1 < 227, inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1], T.float32(0), dtype="float32")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 7, 4, 2):
for i0_1_1, i1_1_1, i2_1_1, i3_1_1, i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 1, 28, 8, 7, 7, 1, 1, 2, 1, 1, 1, 1, 3, 1, 8, 1, 4):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1_1 + i0_2)
h = T.axis.spatial(112, i1_0 * 16 + i1_1_1 * 16 + i1_2 * 8 + i1_3)
w = T.axis.spatial(112, i2_3 + i2_0 * 28 + i2_1_1 + i2_2)
co = T.axis.spatial(64, i3_0 * 32 + i3_1_1 * 4 + i3_2 * 4 + i3_3)
rh = T.axis.reduce(7, i4_1 + i4_0)
rw = T.axis.reduce(7, i5_0 + i5_1)
rc = T.axis.reduce(3, i6_0 * 3 + i6_1)
T.reads(PadInput[n, h * 2 + rh, w * 2 + rw, co
T.writes(conv2d_nhwc_global[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_nhwc_global[n, h, w, co] = T.float32(0)
conv2d_nhwc_global[n, h, w, co] = conv2d_nhwc_global[n, h, w, co] + PadInput[n, h * 2 + rh, w * 2 + rw, co
for ax0, ax1, ax2, ax3 in T.grid(1, 16, 28, 32):
with T.block("conv2d_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(112, i1_0 * 16 + ax1)
v2 = T.axis.spatial(112, i2_0 * 28 + ax2)
v3 = T.axis.spatial(64, i3_0 * 32 + ax3)
T.reads |
(conv2d_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3]
@T.prim_func
def c2d_2(inputs: T.Buffer[(1, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 3, 64), "float32"], conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0_0, i1_0 in T.grid(1, 7):
for ax0, ax1, ax2, ax3 in T.grid(1, 37, 229, 3):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(230, i1_0 * 32 + ax1)
i2 = T.axis.spatial(230, ax2)
i3 = T.axis.spatial(3, ax3)
T.reads(inputs[i0, i1 - 3, i2 - 3, i3])
T.writes(PadInput[i0, i1, i2, i3])
PadInput[i0, i1, i2, i3] = T.if_then_else(3 <= i1 and i1 < 227 and 3 <= i2 and i2 < 227, inputs[i0, i1 - 3, i2 - 3, i3], T.float32(0), dtype="float32")
for i2_0, i3_0, i0_1, i1_1, i2_1, i3_1, i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(4, 2, 1, 1, 28, 8, 7, 7, 1, 1, 2, 1, 1, 1, 1, 3, 1, 8, 1, 4):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
h = T.axis.spatial(112, i1_0 * 16 + i1_1 * 16 + i1_2 * 8 + i1_3)
w = T.axis.spatial(112, i2_3 + i2_0 * 28 + i2_1 + i2_2)
co = T.axis.spatial(64, i3_0 * 32 + i3_1 * 4 + i3_2 * 4 + i3_3)
rh = T.axis.reduce(7, i4_1 + |
i4_0)
rw = T.axis.reduce(7, i5_0 + i5_1)
rc = T.axis.reduce(3, i6_0 * 3 + i6_1)
T.reads(PadInput[n, h * 2 + rh, w * 2 + rw, co
T.writes(conv2d_nhwc[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = conv2d_nhwc[n, h, w, co] + PadInput[n, h * 2 + rh, w * 2 + rw, co
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [7, 1, 2, 8]),
("SamplePerfectTile", [4, 28, 1, 1]),
("SamplePerfectTile", [2, 8, 1, 4]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 1),
("SampleComputeLocation", 6),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [7, 1, 2, 8]),
("SamplePerfectTile", [4, 28, 1, 1]),
("SamplePerfectTile", [2, 8, 1, 4]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 3),
("SampleComputeLocation", -1),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [7, 1, 2, 8]),
("SamplePerfectTile", [4, 28, 1, 1]),
("SamplePerfectTile", [2, 8, 1, 4]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 0),
("SampleComputeLocation", 1),
]
mod = create_te_workload("C2D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[c2d_0, c2d_1, c2d_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_c3d():
@T.prim_func |
def c3d_0(inputs: T.Buffer[(1, 16, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 7, 3, 64), "float32"], conv3d_ndhwc: T.Buffer[(1, 8, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 22, 230, 230, 3], dtype="float32")
conv3d_ndhwc_global = T.alloc_buffer([1, 8, 112, 112, 64], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i4_0 in T.grid(1, 2, 4, 1, 2):
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 13, 61, 229, 3):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(22, i1_0 * 8 + ax1)
i2 = T.axis.spatial(230, i2_0 * 56 + ax2)
i3 = T.axis.spatial(230, ax3)
i4 = T.axis.spatial(3, ax4)
T.reads(inputs[i0, i1 - 3, i2 - 3, i3 - 3, i4])
T.writes(PadInput[i0, i1, i2, i3, i4])
PadInput[i0, i1, i2, i3, i4] = T.if_then_else(3 <= i1 and i1 < 19 and 3 <= i2 and i2 < 227 and 3 <= i3 and i3 < 227, inputs[i0, i1 - 3, i2 - 3, i3 - 3, i4], T.float32(0), dtype="float32")
for i0_1, i1_1, i2_1, i3_1, i4_1 in T.grid(1, 4, 4, 14, 1):
for i5_0, i6_0, i7_0, i8_0, i0_2, i1_2, i2_2, i3_2, i4_2, i5_1, i6_1, i7_1, i8_1, i0_3, i1_3, i2_3, i3_3, i4_3 in T.grid(1, 7, 7, 3, 1, 1, 1, 1, 32, 7, 1, 1, 1, 1, 1, 7, 8, 1):
with T.block("conv3d_ndhwc"):
n = T.axis.spatial(1, i0_1 + i0_2 + i0_3 + i0_0)
d = T.axis.spatial(8, i1_3 + i1_0 * 4 + i1_1 + i1_2)
h = T.axis.spatial(112, i2_0 * 28 + i2_1 * 7 + i2_2 * 7 + i2_3) |
w = T.axis.spatial(112, i3_0 * 112 + i3_1 * 8 + i3_2 * 8 + i3_3)
co = T.axis.spatial(64, i4_3 + i4_0 * 32 + i4_1 * 32 + i4_2)
rd = T.axis.reduce(7, i5_0 * 7 + i5_1)
rh = T.axis.reduce(7, i6_1 + i6_0)
rw = T.axis.reduce(7, i7_0 + i7_1)
rc = T.axis.reduce(3, i8_1 + i8_0)
T.reads(PadInput[n, d * 2 + rd, h * 2 + rh, w * 2 + rw, co
T.writes(conv3d_ndhwc_global[n, d, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv3d_ndhwc_global[n, d, h, w, co] = T.float32(0)
conv3d_ndhwc_global[n, d, h, w, co] = conv3d_ndhwc_global[n, d, h, w, co] + PadInput[n, d * 2 + rd, h * 2 + rh, w * 2 + rw, co
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 1, 7, 8, 32):
with T.block("conv3d_ndhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, i1_0 * 4 + i1_1 + ax1)
v2 = T.axis.spatial(112, i2_0 * 28 + i2_1 * 7 + ax2)
v3 = T.axis.spatial(112, i3_1 * 8 + ax3)
v4 = T.axis.spatial(64, i4_0 * 32 + ax4)
T.reads(conv3d_ndhwc_global[v0, v1, v2, v3, v4])
T.writes(conv3d_ndhwc[v0, v1, v2, v3, v4])
conv3d_ndhwc[v0, v1, v2, v3, v4] = conv3d_ndhwc_global[v0, v1, v2, v3, v4]
@T.prim_func
def c3d_1(inputs: T.Buffer[(1, 16, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 7, 3, 64), "float32"], conv3d_ndhwc: T.Buffer[(1, 8, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes() |
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 22, 230, 230, 3], dtype="float32")
conv3d_ndhwc_global = T.alloc_buffer([1, 8, 112, 112, 64], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i4_0 in T.grid(1, 2, 4, 1, 2):
for i0_1, i1_1, i2_1, i3_1 in T.grid(1, 4, 4, 14):
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 7, 19, 21, 3):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(22, i1_0 * 8 + i1_1 * 2 + ax1)
i2 = T.axis.spatial(230, i2_0 * 56 + i2_1 * 14 + ax2)
i3 = T.axis.spatial(230, i3_1 * 16 + ax3)
i4 = T.axis.spatial(3, ax4)
T.reads(inputs[i0, i1 - 3, i2 - 3, i3 - 3, i4])
T.writes(PadInput[i0, i1, i2, i3, i4])
PadInput[i0, i1, i2, i3, i4] = T.if_then_else(3 <= i1 and i1 < 19 and 3 <= i2 and i2 < 227 and 3 <= i3 and i3 < 227, inputs[i0, i1 - 3, i2 - 3, i3 - 3, i4], T.float32(0), dtype="float32")
for i4_1, i5_0, i6_0, i7_0, i8_0, i0_2, i1_2, i2_2, i3_2, i4_2, i5_1, i6_1, i7_1, i8_1, i0_3, i1_3, i2_3, i3_3, i4_3 in T.grid(1, 1, 7, 7, 3, 1, 1, 1, 1, 32, 7, 1, 1, 1, 1, 1, 7, 8, 1):
with T.block("conv3d_ndhwc"):
n = T.axis.spatial(1, i0_1 + i0_2 + i0_3 + i0_0)
d = T.axis.spatial(8, i1_3 + i1_0 * 4 + i1_1 + i1_2)
h = T.axis.spatial(112, i2_0 * 28 + i2_1 * 7 + i2_2 * 7 + i2_3)
w = T.axis.spatial(112, i3_0 * 112 + i3_1 * 8 + i3_2 * 8 + i3_3)
co = T.axis.spatial(64, i4_3 + i4_0 * 32 + i4_1 * 32 + i4_2)
rd = T.axis.reduce(7, i5_0 * 7 + i5_1)
rh = T.axis |
.reduce(7, i6_1 + i6_0)
rw = T.axis.reduce(7, i7_0 + i7_1)
rc = T.axis.reduce(3, i8_1 + i8_0)
T.reads(PadInput[n, d * 2 + rd, h * 2 + rh, w * 2 + rw, co
T.writes(conv3d_ndhwc_global[n, d, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv3d_ndhwc_global[n, d, h, w, co] = T.float32(0)
conv3d_ndhwc_global[n, d, h, w, co] = conv3d_ndhwc_global[n, d, h, w, co] + PadInput[n, d * 2 + rd, h * 2 + rh, w * 2 + rw, co
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 4, 28, 112, 32):
with T.block("conv3d_ndhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, i1_0 * 4 + ax1)
v2 = T.axis.spatial(112, i2_0 * 28 + ax2)
v3 = T.axis.spatial(112, ax3)
v4 = T.axis.spatial(64, i4_0 * 32 + ax4)
T.reads(conv3d_ndhwc_global[v0, v1, v2, v3, v4])
T.writes(conv3d_ndhwc[v0, v1, v2, v3, v4])
conv3d_ndhwc[v0, v1, v2, v3, v4] = conv3d_ndhwc_global[v0, v1, v2, v3, v4]
@T.prim_func
def c3d_2(inputs: T.Buffer[(1, 16, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 7, 3, 64), "float32"], conv3d_ndhwc: T.Buffer[(1, 8, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 22, 230, 230, 3], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i4_0, i0_1, i1_1, i2_1, i3_1 in T.grid(1, 2, 4, 1, 2, 1, 4, 4, 14):
for ax0, ax1, ax2, |
ax3, ax4 in T.grid(1, 7, 19, 21, 3):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(22, i1_0 * 8 + i1_1 * 2 + ax1)
i2 = T.axis.spatial(230, i2_0 * 56 + i2_1 * 14 + ax2)
i3 = T.axis.spatial(230, i3_1 * 16 + ax3)
i4 = T.axis.spatial(3, ax4)
T.reads(inputs[i0, i1 - 3, i2 - 3, i3 - 3, i4])
T.writes(PadInput[i0, i1, i2, i3, i4])
PadInput[i0, i1, i2, i3, i4] = T.if_then_else(3 <= i1 and i1 < 19 and 3 <= i2 and i2 < 227 and 3 <= i3 and i3 < 227, inputs[i0, i1 - 3, i2 - 3, i3 - 3, i4], T.float32(0), dtype="float32")
for i4_1, i5_0, i6_0, i7_0, i8_0, i0_2, i1_2, i2_2, i3_2, i4_2, i5_1, i6_1, i7_1, i8_1, i0_3, i1_3, i2_3, i3_3, i4_3 in T.grid(1, 1, 7, 7, 3, 1, 1, 1, 1, 32, 7, 1, 1, 1, 1, 1, 7, 8, 1):
with T.block("conv3d_ndhwc"):
n = T.axis.spatial(1, i0_1 + i0_2 + i0_3 + i0_0)
d = T.axis.spatial(8, i1_3 + i1_0 * 4 + i1_1 + i1_2)
h = T.axis.spatial(112, i2_0 * 28 + i2_1 * 7 + i2_2 * 7 + i2_3)
w = T.axis.spatial(112, i3_0 * 112 + i3_1 * 8 + i3_2 * 8 + i3_3)
co = T.axis.spatial(64, i4_3 + i4_0 * 32 + i4_1 * 32 + i4_2)
rd = T.axis.reduce(7, i5_0 * 7 + i5_1)
rh = T.axis.reduce(7, i6_1 + i6_0)
rw = T.axis.reduce(7, i7_0 + i7_1)
rc = T.axis.reduce(3, i8_1 + i8_0)
T.reads(PadInput[n, d * 2 + rd, h * 2 + rh, w * 2 + rw, co
T.writes(conv3d_ndhwc[n, d, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv3d_ndhwc[n, d, h, w, co] = T.float32(0)
conv3d_ndhwc[n, d, h, w, c |
o] = conv3d_ndhwc[n, d, h, w, co] + PadInput[n, d * 2 + rd, h * 2 + rh, w * 2 + rw, co
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [2, 4, 1, 1]),
("SamplePerfectTile", [4, 4, 1, 7]),
("SamplePerfectTile", [1, 14, 1, 8]),
("SamplePerfectTile", [2, 1, 32, 1]),
("SamplePerfectTile", [1, 7]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [3, 1]),
("SampleCategorical", 3),
("SampleComputeLocation", 4),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [2, 4, 1, 1]),
("SamplePerfectTile", [4, 4, 1, 7]),
("SamplePerfectTile", [1, 14, 1, 8]),
("SamplePerfectTile", [2, 1, 32, 1]),
("SamplePerfectTile", [1, 7]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [3, 1]),
("SampleCategorical", 2),
("SampleComputeLocation", 8),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [2, 4, 1, 1]),
("SamplePerfectTile", [4, 4, 1, 7]),
("SamplePerfectTile", [1, 14, 1, 8]),
("SamplePerfectTile", [2, 1, 32, 1]),
("SamplePerfectTile", [1, 7]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [3, 1]),
("SampleCategorical", 1),
("SampleComputeLocation", 8),
]
mod = create_te_workload("C3D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[c3d_0, c3d_1, c3d_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_cap():
@T.prim_func
def cap_0(inputs: T.Buffer[(1, 16, 16, 4, 4, 32), "float32"], weight: T.Buffer[(3, 3, 4, 4, 32, 32), "float32"], conv2d_capsule_nhwijc: T.Buffer[(1, 8, 8, 4, 4, 32), "float32"]) -> None:
T.func_attr({"gl |
obal_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 18, 18, 4, 4, 32], dtype="float32")
conv2d_capsule_nhwijc_global = T.alloc_buffer([1, 8, 8, 4, 4, 32], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i4_0, i5_0, i0_1, i1_1 in T.grid(1, 2, 1, 1, 1, 1, 1, 4):
for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 3, 17, 4, 4, 32):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(18, i1_0 * 8 + i1_1 * 2 + ax1)
i2 = T.axis.spatial(18, ax2)
i3, i4, i5 = T.axis.remap("SSS", [ax3, ax4, ax5])
T.reads(inputs[i0, i1 - 1, i2 - 1, i3, i4, i5])
T.writes(PadInput[i0, i1, i2, i3, i4, i5])
PadInput[i0, i1, i2, i3, i4, i5] = T.if_then_else(1 <= i1 and i1 < 17 and 1 <= i2 and i2 < 17, inputs[i0, i1 - 1, i2 - 1, i3, i4, i5], T.float32(0), dtype="float32")
for i2_1, i3_1, i4_1, i5_1 in T.grid(4, 1, 4, 2):
for i6_0, i7_0, i8_0, i9_0, i0_2, i1_2, i2_2, i3_2, i4_2, i5_2, i6_1, i7_1, i8_1, i9_1, i0_3, i1_3, i2_3, i3_3, i4_3, i5_3 in T.grid(1, 3, 4, 1, 1, 1, 2, 1, 1, 1, 3, 1, 1, 32, 1, 1, 1, 4, 1, 16):
with T.block("conv2d_capsule_nhwijc"):
n = T.axis.spatial(1, i0_2 + i0_3 + i0_0 + i0_1)
h = T.axis.spatial(8, i1_0 * 4 + i1_1 + i1_2 + i1_3)
w = T.axis.spatial(8, i2_0 * 8 + i2_1 * 2 + i2_2 + i2_3)
cap_i = T.axis.spatial(4, i3_0 * 4 + i3_1 * 4 + i3_2 * 4 + i3_3)
cap_j = T.axis.spatial(4, i4_0 * 4 + i4_1 + i4_2 + i4_3)
co = T.axis.spatial(32, |
i5_0 * 32 + i5_1 * 16 + i5_2 * 16 + i5_3)
rh = T.axis.reduce(3, i6_0 * 3 + i6_1)
rw = T.axis.reduce(3, i7_1 + i7_0)
cap_k = T.axis.reduce(4, i8_0 + i8_1)
rc = T.axis.reduce(32, i9_0 * 32 + i9_1)
T.reads(PadInput[n, h * 2 + rh, w * 2 + rw, cap_i, cap_k, rc], weight[rh, rw, cap_k, cap_j, rc, co])
T.writes(conv2d_capsule_nhwijc_global[n, h, w, cap_i, cap_j, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_capsule_nhwijc_global[n, h, w, cap_i, cap_j, co] = T.float32(0)
conv2d_capsule_nhwijc_global[n, h, w, cap_i, cap_j, co] = conv2d_capsule_nhwijc_global[n, h, w, cap_i, cap_j, co] + PadInput[n, h * 2 + rh, w * 2 + rw, cap_i, cap_k, rc] * weight[rh, rw, cap_k, cap_j, rc, co]
for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 1, 2, 4, 1, 16):
with T.block("conv2d_capsule_nhwijc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, i1_0 * 4 + i1_1 + ax1)
v2 = T.axis.spatial(8, i2_1 * 2 + ax2)
v3 = T.axis.spatial(4, ax3)
v4 = T.axis.spatial(4, i4_1 + ax4)
v5 = T.axis.spatial(32, i5_1 * 16 + ax5)
T.reads(conv2d_capsule_nhwijc_global[v0, v1, v2, v3, v4, v5])
T.writes(conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5])
conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5] = conv2d_capsule_nhwijc_global[v0, v1, v2, v3, v4, v5]
@T.prim_func
def cap_1(inputs: T.Buffer[(1, 16, 16, 4, 4, 32), "float32"], weight: T.Buffer[(3, 3, 4, 4, 32, 32), "float32"], conv2d_capsule_nhwijc: T.Buffer[(1, 8, 8, 4, 4, 32), "float32"]) -> None: |
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 18, 18, 4, 4, 32], dtype="float32")
conv2d_capsule_nhwijc_global = T.alloc_buffer([1, 8, 8, 4, 4, 32], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i4_0, i5_0 in T.grid(1, 2, 1, 1, 1, 1):
for i0_1, i1_1, i2_1, i3_1, i4_1, i5_1 in T.grid(1, 4, 4, 1, 4, 2):
for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 3, 5, 4, 4, 32):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(18, i1_0 * 8 + i1_1 * 2 + ax1)
i2 = T.axis.spatial(18, i2_1 * 4 + ax2)
i3, i4, i5 = T.axis.remap("SSS", [ax3, ax4, ax5])
T.reads(inputs[i0, i1 - 1, i2 - 1, i3, i4, i5])
T.writes(PadInput[i0, i1, i2, i3, i4, i5])
PadInput[i0, i1, i2, i3, i4, i5] = T.if_then_else(1 <= i1 and i1 < 17 and 1 <= i2 and i2 < 17, inputs[i0, i1 - 1, i2 - 1, i3, i4, i5], T.float32(0), dtype="float32")
for i6_0, i7_0, i8_0, i9_0, i0_2, i1_2, i2_2, i3_2, i4_2, i5_2, i6_1, i7_1, i8_1, i9_1, i0_3, i1_3, i2_3, i3_3, i4_3, i5_3 in T.grid(1, 3, 4, 1, 1, 1, 2, 1, 1, 1, 3, 1, 1, 32, 1, 1, 1, 4, 1, 16):
with T.block("conv2d_capsule_nhwijc"):
n = T.axis.spatial(1, i0_2 + i0_3 + i0_0 + i0_1)
h = T.axis.spatial(8, i1_0 * 4 + i1_1 + i1_2 + i1_3)
w = T.axis.spatial(8, i2_0 * 8 + i2_1 * 2 + i2_2 + i2_3)
cap_i = T.axis.spatial(4, i3_0 * 4 + i3_1 * 4 + i3_2 * 4 + i3_3)
cap_j = T.axis.spatial(4, i4 |
_0 * 4 + i4_1 + i4_2 + i4_3)
co = T.axis.spatial(32, i5_0 * 32 + i5_1 * 16 + i5_2 * 16 + i5_3)
rh = T.axis.reduce(3, i6_0 * 3 + i6_1)
rw = T.axis.reduce(3, i7_1 + i7_0)
cap_k = T.axis.reduce(4, i8_0 + i8_1)
rc = T.axis.reduce(32, i9_0 * 32 + i9_1)
T.reads(PadInput[n, h * 2 + rh, w * 2 + rw, cap_i, cap_k, rc], weight[rh, rw, cap_k, cap_j, rc, co])
T.writes(conv2d_capsule_nhwijc_global[n, h, w, cap_i, cap_j, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_capsule_nhwijc_global[n, h, w, cap_i, cap_j, co] = T.float32(0)
conv2d_capsule_nhwijc_global[n, h, w, cap_i, cap_j, co] = conv2d_capsule_nhwijc_global[n, h, w, cap_i, cap_j, co] + PadInput[n, h * 2 + rh, w * 2 + rw, cap_i, cap_k, rc] * weight[rh, rw, cap_k, cap_j, rc, co]
for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 4, 8, 4, 4, 32):
with T.block("conv2d_capsule_nhwijc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, i1_0 * 4 + ax1)
v2, v3, v4, v5 = T.axis.remap("SSSS", [ax2, ax3, ax4, ax5])
T.reads(conv2d_capsule_nhwijc_global[v0, v1, v2, v3, v4, v5])
T.writes(conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5])
conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5] = conv2d_capsule_nhwijc_global[v0, v1, v2, v3, v4, v5]
@T.prim_func
def cap_2(inputs: T.Buffer[(1, 16, 16, 4, 4, 32), "float32"], weight: T.Buffer[(3, 3, 4, 4, 32, 32), "float32"], conv2d_capsule_nhwijc: T.Buffer[(1, 8, 8, 4, 4, 32), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"): |
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 18, 18, 4, 4, 32], dtype="float32")
for i0, i1, i2, i3, i4, i5 in T.grid(1, 18, 18, 4, 4, 32):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1, i4_1, i5_1 = T.axis.remap("SSSSSS", [i0, i1, i2, i3, i4, i5])
T.reads(inputs[i0_1, i1_1 - 1, i2_1 - 1, i3_1, i4_1, i5_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1, i4_1, i5_1])
PadInput[i0_1, i1_1, i2_1, i3_1, i4_1, i5_1] = T.if_then_else(1 <= i1_1 and i1_1 < 17 and 1 <= i2_1 and i2_1 < 17, inputs[i0_1, i1_1 - 1, i2_1 - 1, i3_1, i4_1, i5_1], T.float32(0), dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i4_0, i5_0, i0_1_1, i1_1_1, i2_1_1, i3_1_1, i4_1_1, i5_1_1, i6_0, i7_0, i8_0, i9_0, i0_2, i1_2, i2_2, i3_2, i4_2, i5_2, i6_1, i7_1, i8_1, i9_1, i0_3, i1_3, i2_3, i3_3, i4_3, i5_3 in T.grid(1, 2, 1, 1, 1, 1, 1, 4, 4, 1, 4, 2, 1, 3, 4, 1, 1, 1, 2, 1, 1, 1, 3, 1, 1, 32, 1, 1, 1, 4, 1, 16):
with T.block("conv2d_capsule_nhwijc"):
n = T.axis.spatial(1, i0_2 + i0_3 + i0_0 + i0_1_1)
h = T.axis.spatial(8, i1_0 * 4 + i1_1_1 + i1_2 + i1_3)
w = T.axis.spatial(8, i2_0 * 8 + i2_1_1 * 2 + i2_2 + i2_3)
cap_i = T.axis.spatial(4, i3_0 * 4 + i3_1_1 * 4 + i3_2 * 4 + i3_3)
cap_j = T.axis.spatial(4, i4_0 * 4 + i4_1_1 + i4_2 + i4_3)
co = T.axis.spatial(32, i5_0 * 32 + i5_1_1 * 16 + i5_2 * 16 + i5_3)
rh = T.axis.reduce(3, i6_0 * 3 + i6_1)
rw = T.axis.reduce(3, i7_1 + i7_0)
cap_k = T.axis.reduce(4, i8_0 + i8_1)
rc = T.axis.reduce(32, i9_0 * 32 + i9_1)
T.reads(PadInput[n, h * 2 + rh, w * 2 + rw, cap_i, cap_k, rc], weight[rh, rw, cap_k, cap_j, rc, co]) |
T.writes(conv2d_capsule_nhwijc[n, h, w, cap_i, cap_j, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_capsule_nhwijc[n, h, w, cap_i, cap_j, co] = T.float32(0)
conv2d_capsule_nhwijc[n, h, w, cap_i, cap_j, co] = conv2d_capsule_nhwijc[n, h, w, cap_i, cap_j, co] + PadInput[n, h * 2 + rh, w * 2 + rw, cap_i, cap_k, rc] * weight[rh, rw, cap_k, cap_j, rc, co]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [2, 4, 1, 1]),
("SamplePerfectTile", [1, 4, 2, 1]),
("SamplePerfectTile", [1, 1, 1, 4]),
("SamplePerfectTile", [1, 4, 1, 1]),
("SamplePerfectTile", [1, 2, 1, 16]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [3, 1]),
("SamplePerfectTile", [4, 1]),
("SamplePerfectTile", [1, 32]),
("SampleCategorical", 0),
("SampleComputeLocation", 7),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [2, 4, 1, 1]),
("SamplePerfectTile", [1, 4, 2, 1]),
("SamplePerfectTile", [1, 1, 1, 4]),
("SamplePerfectTile", [1, 4, 1, 1]),
("SamplePerfectTile", [1, 2, 1, 16]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [3, 1]),
("SamplePerfectTile", [4, 1]),
("SamplePerfectTile", [1, 32]),
("SampleCategorical", 0),
("SampleComputeLocation", 11),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [2, 4, 1, 1]),
("SamplePerfectTile", [1, 4, 2, 1]),
("SamplePerfectTile", [1, 1, 1, 4]),
("SamplePerfectTile", [1, 4, 1, 1]),
("SamplePerfectTile", [1, 2, 1, 16]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [3, 1]),
("SamplePerfectTile", [4, 1]),
("SamplePerfectTile", [1, 32]),
("SampleCategorical", 1),
("SampleCom |
puteLocation", -1),
]
mod = create_te_workload("CAP", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cap_0, cap_1, cap_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_dep():
@T.prim_func
def dep_0(placeholder: T.Buffer[(1, 112, 112, 32), "float32"], placeholder_1: T.Buffer[(1, 3, 3, 32), "float32"], depth_conv2d_nhwc: T.Buffer[(1, 112, 112, 32), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 114, 114, 32], dtype="float32")
depth_conv2d_nhwc_global = T.alloc_buffer([1, 112, 112, 32], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 114, 114, 32):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(placeholder[i0_1, i1_1 - 1, i2_1 - 1, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(1 <= i1_1 and i1_1 < 113 and 1 <= i2_1 and i2_1 < 113, placeholder[i0_1, i1_1 - 1, i2_1 - 1, i3_1], T.float32(0), dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i0_1_1, i1_1_1, i2_1_1, i3_1_1 in T.grid(1, 1, 1, 1, 1, 4, 4, 8):
for i4_0, i5_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 1, 1, 2, 7, 2, 3, 3, 1, 14, 4, 2):
with T.block("depth_conv2d_nhwc"):
n = T.axis.spatial(1, i0_2 + i0_3 + i0_0 + i0_1_1)
h = T.axis.spatial(112, i1_0 * 112 + i1_1_1 * 28 + i1_2 * 14 + i1_3)
w = T.axis.spatial(112, i2_0 * 112 + i2_1_1 * 28 + i2_2 * 4 + i2_3) |
c = T.axis.spatial(32, i3_0 * 32 + i3_1_1 * 4 + i3_2 * 2 + i3_3)
rh = T.axis.reduce(3, i4_0 * 3 + i4_1)
rw = T.axis.reduce(3, i5_0 * 3 + i5_1)
T.reads(PadInput[n, h + rh, w + rw, c], placeholder_1[0, rh, rw, c])
T.writes(depth_conv2d_nhwc_global[n, h, w, c])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
depth_conv2d_nhwc_global[n, h, w, c] = T.float32(0)
depth_conv2d_nhwc_global[n, h, w, c] = depth_conv2d_nhwc_global[n, h, w, c] + PadInput[n, h + rh, w + rw, c] * placeholder_1[0, rh, rw, c]
for ax0, ax1, ax2, ax3 in T.grid(1, 28, 28, 4):
with T.block("depth_conv2d_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(112, i1_1_1 * 28 + ax1)
v2 = T.axis.spatial(112, i2_1_1 * 28 + ax2)
v3 = T.axis.spatial(32, i3_1_1 * 4 + ax3)
T.reads(depth_conv2d_nhwc_global[v0, v1, v2, v3])
T.writes(depth_conv2d_nhwc[v0, v1, v2, v3])
depth_conv2d_nhwc[v0, v1, v2, v3] = depth_conv2d_nhwc_global[v0, v1, v2, v3]
@T.prim_func
def dep_1(placeholder: T.Buffer[(1, 112, 112, 32), "float32"], placeholder_1: T.Buffer[(1, 3, 3, 32), "float32"], depth_conv2d_nhwc: T.Buffer[(1, 112, 112, 32), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 114, 114, 32], dtype="float32")
depth_conv2d_nhwc_global = T.alloc_buffer([1, 112, 112, 32], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 114, |
114, 32):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(placeholder[i0_1, i1_1 - 1, i2_1 - 1, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(1 <= i1_1 and i1_1 < 113 and 1 <= i2_1 and i2_1 < 113, placeholder[i0_1, i1_1 - 1, i2_1 - 1, i3_1], T.float32(0), dtype="float32")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 1, 1, 1):
for i0_1_1, i1_1_1, i2_1_1, i3_1_1, i4_0, i5_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 4, 4, 8, 1, 1, 1, 2, 7, 2, 3, 3, 1, 14, 4, 2):
with T.block("depth_conv2d_nhwc"):
n = T.axis.spatial(1, i0_2 + i0_3 + i0_0 + i0_1_1)
h = T.axis.spatial(112, i1_0 * 112 + i1_1_1 * 28 + i1_2 * 14 + i1_3)
w = T.axis.spatial(112, i2_0 * 112 + i2_1_1 * 28 + i2_2 * 4 + i2_3)
c = T.axis.spatial(32, i3_0 * 32 + i3_1_1 * 4 + i3_2 * 2 + i3_3)
rh = T.axis.reduce(3, i4_0 * 3 + i4_1)
rw = T.axis.reduce(3, i5_0 * 3 + i5_1)
T.reads(PadInput[n, h + rh, w + rw, c], placeholder_1[0, rh, rw, c])
T.writes(depth_conv2d_nhwc_global[n, h, w, c])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
depth_conv2d_nhwc_global[n, h, w, c] = T.float32(0)
depth_conv2d_nhwc_global[n, h, w, c] = depth_conv2d_nhwc_global[n, h, w, c] + PadInput[n, h + rh, w + rw, c] * placeholder_1[0, rh, rw, c]
for ax0, ax1, ax2, ax3 in T.grid(1, 112, 112, 32):
with T.block("depth_conv2d_nhwc_global"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(depth_conv2d_nhwc_global[v |
0, v1, v2, v3])
T.writes(depth_conv2d_nhwc[v0, v1, v2, v3])
depth_conv2d_nhwc[v0, v1, v2, v3] = depth_conv2d_nhwc_global[v0, v1, v2, v3]
@T.prim_func
def dep_2(placeholder: T.Buffer[(1, 112, 112, 32), "float32"], placeholder_1: T.Buffer[(1, 3, 3, 32), "float32"], depth_conv2d_nhwc: T.Buffer[(1, 112, 112, 32), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 114, 114, 32], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i0_1, i1_1 in T.grid(1, 1, 1, 1, 1, 4):
for ax0, ax1, ax2, ax3 in T.grid(1, 30, 114, 32):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(114, i1_1 * 28 + ax1)
i2, i3 = T.axis.remap("SS", [ax2, ax3])
T.reads(placeholder[i0, i1 - 1, i2 - 1, i3])
T.writes(PadInput[i0, i1, i2, i3])
PadInput[i0, i1, i2, i3] = T.if_then_else(1 <= i1 and i1 < 113 and 1 <= i2 and i2 < 113, placeholder[i0, i1 - 1, i2 - 1, i3], T.float32(0), dtype="float32")
for i2_1, i3_1, i4_0, i5_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i0_3, i1_3, i2_3, i3_3 in T.grid(4, 8, 1, 1, 1, 2, 7, 2, 3, 3, 1, 14, 4, 2):
with T.block("depth_conv2d_nhwc"):
n = T.axis.spatial(1, i0_2 + i0_3 + i0_0 + i0_1)
h = T.axis.spatial(112, i1_0 * 112 + i1_1 * 28 + i1_2 * 14 + i1_3)
w = T.axis.spatial(112, i2_0 * 112 + i2_1 * 28 + i2_2 * 4 + i2_3)
c = T.axis.spatial(32, i3_0 * 32 + i3_1 * 4 + i3_2 * 2 + i3_3)
rh = T.axis.reduce(3, i4_0 * 3 + i4_1) |
rw = T.axis.reduce(3, i5_0 * 3 + i5_1)
T.reads(PadInput[n, h + rh, w + rw, c], placeholder_1[0, rh, rw, c])
T.writes(depth_conv2d_nhwc[n, h, w, c])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
depth_conv2d_nhwc[n, h, w, c] = T.float32(0)
depth_conv2d_nhwc[n, h, w, c] = depth_conv2d_nhwc[n, h, w, c] + PadInput[n, h + rh, w + rw, c] * placeholder_1[0, rh, rw, c]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 4, 2, 14]),
("SamplePerfectTile", [1, 4, 7, 4]),
("SamplePerfectTile", [1, 8, 2, 2]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 2),
("SampleComputeLocation", -1),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 4, 2, 14]),
("SamplePerfectTile", [1, 4, 7, 4]),
("SamplePerfectTile", [1, 8, 2, 2]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 1),
("SampleComputeLocation", -1),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 4, 2, 14]),
("SamplePerfectTile", [1, 4, 7, 4]),
("SamplePerfectTile", [1, 8, 2, 2]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 0),
("SampleComputeLocation", 5),
]
mod = create_te_workload("DEP", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[dep_0, dep_1, dep_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_dil():
@T.prim_func
def dil_0(inputs: T.Buffer[(1, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 3, 64), "float32"], conv2d_nhwc: T.Buffe |
r[(1, 109, 109, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
conv2d_nhwc_global = T.alloc_buffer([1, 109, 109, 64], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i0_1, i1_1, i2_1, i3_1 in T.grid(1, 109, 1, 4, 1, 1, 1, 2):
for ax0, ax1, ax2, ax3 in T.grid(1, 13, 229, 3):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(230, i1_0 * 2 + ax1)
i2 = T.axis.spatial(230, ax2)
i3 = T.axis.spatial(3, ax3)
T.reads(inputs[i0, i1 - 3, i2 - 3, i3])
T.writes(PadInput[i0, i1, i2, i3])
PadInput[i0, i1, i2, i3] = T.if_then_else(3 <= i1 and i1 < 227 and 3 <= i2 and i2 < 227, inputs[i0, i1 - 3, i2 - 3, i3], T.float32(0), dtype="float32")
for i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(7, 1, 1, 1, 1, 109, 8, 1, 7, 3, 1, 1, 1, 1):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
h = T.axis.spatial(109, i1_2 + i1_3 + i1_0 + i1_1)
w = T.axis.spatial(109, i2_3 + i2_0 * 109 + i2_1 * 109 + i2_2)
co = T.axis.spatial(64, i3_0 * 16 + i3_1 * 8 + i3_2 + i3_3)
rh = T.axis.reduce(7, i4_1 + i4_0)
rw = T.axis.reduce(7, i5_0 * 7 + i5_1)
rc = T.axis.reduce(3, i6_0 * 3 + i6_1)
T.reads(PadInput[n, h * 2 + rh * 2, w * 2 + rw * 2, co
T.writes(conv2d_n |
hwc_global[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_nhwc_global[n, h, w, co] = T.float32(0)
conv2d_nhwc_global[n, h, w, co] = conv2d_nhwc_global[n, h, w, co] + PadInput[n, h * 2 + rh * 2, w * 2 + rw * 2, co
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 109, 8):
with T.block("conv2d_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(109, i1_0 + ax1)
v2 = T.axis.spatial(109, ax2)
v3 = T.axis.spatial(64, i3_0 * 16 + i3_1 * 8 + ax3)
T.reads(conv2d_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3]
@T.prim_func
def dil_1(inputs: T.Buffer[(1, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 3, 64), "float32"], conv2d_nhwc: T.Buffer[(1, 109, 109, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
conv2d_nhwc_global = T.alloc_buffer([1, 109, 109, 64], dtype="float32")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 109, 1, 4):
for i0_1, i1_1, i2_1, i3_1, i4_0 in T.grid(1, 1, 1, 2, 7):
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 229, 3):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(230, i1_0 * 2 + i4_0 * 2 + ax1)
i2 = T.axis.spatial(230, ax2)
i3 |
= T.axis.spatial(3, ax3)
T.reads(inputs[i0, i1 - 3, i2 - 3, i3])
T.writes(PadInput[i0, i1, i2, i3])
PadInput[i0, i1, i2, i3] = T.if_then_else(3 <= i1 and i1 < 227 and 3 <= i2 and i2 < 227, inputs[i0, i1 - 3, i2 - 3, i3], T.float32(0), dtype="float32")
for i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 1, 1, 1, 109, 8, 1, 7, 3, 1, 1, 1, 1):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
h = T.axis.spatial(109, i1_2 + i1_3 + i1_0 + i1_1)
w = T.axis.spatial(109, i2_3 + i2_0 * 109 + i2_1 * 109 + i2_2)
co = T.axis.spatial(64, i3_0 * 16 + i3_1 * 8 + i3_2 + i3_3)
rh = T.axis.reduce(7, i4_1 + i4_0)
rw = T.axis.reduce(7, i5_0 * 7 + i5_1)
rc = T.axis.reduce(3, i6_0 * 3 + i6_1)
T.reads(PadInput[n, h * 2 + rh * 2, w * 2 + rw * 2, co
T.writes(conv2d_nhwc_global[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_nhwc_global[n, h, w, co] = T.float32(0)
conv2d_nhwc_global[n, h, w, co] = conv2d_nhwc_global[n, h, w, co] + PadInput[n, h * 2 + rh * 2, w * 2 + rw * 2, co
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 109, 16):
with T.block("conv2d_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(109, i1_0 + ax1)
v2 = T.axis.spatial(109, ax2)
v3 = T.axis.spatial(64, i3_0 * 16 + ax3)
T.reads(conv2d_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v |
0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3]
@T.prim_func
def dil_2(inputs: T.Buffer[(1, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 3, 64), "float32"], conv2d_nhwc: T.Buffer[(1, 109, 109, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0_0, i1_0 in T.grid(1, 109):
for ax0, ax1, ax2, ax3 in T.grid(1, 13, 229, 3):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(230, i1_0 * 2 + ax1)
i2 = T.axis.spatial(230, ax2)
i3 = T.axis.spatial(3, ax3)
T.reads(inputs[i0, i1 - 3, i2 - 3, i3])
T.writes(PadInput[i0, i1, i2, i3])
PadInput[i0, i1, i2, i3] = T.if_then_else(3 <= i1 and i1 < 227 and 3 <= i2 and i2 < 227, inputs[i0, i1 - 3, i2 - 3, i3], T.float32(0), dtype="float32")
for i2_0, i3_0, i0_1, i1_1, i2_1, i3_1, i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 4, 1, 1, 1, 2, 7, 1, 1, 1, 1, 109, 8, 1, 7, 3, 1, 1, 1, 1):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
h = T.axis.spatial(109, i1_2 + i1_3 + i1_0 + i1_1)
w = T.axis.spatial(109, i2_3 + i2_0 * 109 + i2_1 * 109 + i2_2)
co = T.axis.spatial(64, i3_0 * 16 + i3_1 * 8 + i3_2 + i3_3)
rh = T.axis.reduce(7, i4_1 + i4_0)
rw = T.axis.reduce(7, i5_0 * 7 + i5_1) |
rc = T.axis.reduce(3, i6_0 * 3 + i6_1)
T.reads(PadInput[n, h * 2 + rh * 2, w * 2 + rw * 2, co
T.writes(conv2d_nhwc[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = conv2d_nhwc[n, h, w, co] + PadInput[n, h * 2 + rh * 2, w * 2 + rw * 2, co
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [109, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 109, 1]),
("SamplePerfectTile", [4, 2, 8, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [1, 7]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 2),
("SampleComputeLocation", 7),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [109, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 109, 1]),
("SamplePerfectTile", [4, 2, 8, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [1, 7]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 0),
("SampleComputeLocation", 8),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [109, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 109, 1]),
("SamplePerfectTile", [4, 2, 8, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [1, 7]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 0),
("SampleComputeLocation", 1),
]
mod = create_te_workload("DIL", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[dil_0, dil_1, dil_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_gmm():
@T.prim_func
def gmm_0(X: T.Buffer[(1, 128, 128), "float32"], Y: T.Buffer |
[(1, 128, 128), "float32"], Z: T.Buffer[(1, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
Z_global = T.alloc_buffer([1, 128, 128], dtype="float32")
for i0_0, i1_0, i2_0, i0_1, i1_1, i2_1 in T.grid(1, 4, 2, 1, 1, 8):
for i3_0, i0_2, i1_2, i2_2, i3_1, i0_3, i1_3, i2_3 in T.grid(128, 1, 16, 1, 1, 1, 2, 8):
with T.block("Z"):
b = T.axis.spatial(1, i0_0 + i0_1 + i0_2 + i0_3)
i = T.axis.spatial(128, i1_0 * 32 + i1_1 * 32 + i1_2 * 2 + i1_3)
j = T.axis.spatial(128, i2_0 * 64 + i2_1 * 8 + i2_2 * 8 + i2_3)
k = T.axis.reduce(128, i3_1 + i3_0)
T.reads(X[b, i, k], Y[b, k, j])
T.writes(Z_global[b, i, j])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
Z_global[b, i, j] = T.float32(0)
Z_global[b, i, j] = Z_global[b, i, j] + X[b, i, k] * Y[b, k, j]
for ax0, ax1, ax2 in T.grid(1, 32, 8):
with T.block("Z_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, i1_0 * 32 + ax1)
v2 = T.axis.spatial(128, i2_0 * 64 + i2_1 * 8 + ax2)
T.reads(Z_global[v0, v1, v2])
T.writes(Z[v0, v1, v2])
Z[v0, v1, v2] = Z_global[v0, v1, v2]
@T.prim_func
def gmm_1(X: T.Buffer[(1, 128, 128), "float32"], Y: T.Buffer[(1, 128, 128), "float32"], Z: T.Buffer[(1, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True}) |
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
Z_global = T.alloc_buffer([1, 128, 128], dtype="float32")
for i0_0, i1_0, i2_0 in T.grid(1, 4, 2):
for i0_1, i1_1, i2_1, i3_0, i0_2, i1_2, i2_2, i3_1, i0_3, i1_3, i2_3 in T.grid(1, 1, 8, 128, 1, 16, 1, 1, 1, 2, 8):
with T.block("Z"):
b = T.axis.spatial(1, i0_0 + i0_1 + i0_2 + i0_3)
i = T.axis.spatial(128, i1_0 * 32 + i1_1 * 32 + i1_2 * 2 + i1_3)
j = T.axis.spatial(128, i2_0 * 64 + i2_1 * 8 + i2_2 * 8 + i2_3)
k = T.axis.reduce(128, i3_1 + i3_0)
T.reads(X[b, i, k], Y[b, k, j])
T.writes(Z_global[b, i, j])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
Z_global[b, i, j] = T.float32(0)
Z_global[b, i, j] = Z_global[b, i, j] + X[b, i, k] * Y[b, k, j]
for ax0, ax1, ax2 in T.grid(1, 32, 64):
with T.block("Z_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, i1_0 * 32 + ax1)
v2 = T.axis.spatial(128, i2_0 * 64 + ax2)
T.reads(Z_global[v0, v1, v2])
T.writes(Z[v0, v1, v2])
Z[v0, v1, v2] = Z_global[v0, v1, v2]
@T.prim_func
def gmm_2(X: T.Buffer[(1, 128, 128), "float32"], Y: T.Buffer[(1, 128, 128), "float32"], Z: T.Buffer[(1, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedul |
e.vectorize":64})
for i0_0, i1_0, i2_0, i0_1, i1_1, i2_1, i3_0, i0_2, i1_2, i2_2, i3_1, i0_3, i1_3, i2_3 in T.grid(1, 4, 2, 1, 1, 8, 128, 1, 16, 1, 1, 1, 2, 8):
with T.block("Z"):
b = T.axis.spatial(1, i0_0 + i0_1 + i0_2 + i0_3)
i = T.axis.spatial(128, i1_0 * 32 + i1_1 * 32 + i1_2 * 2 + i1_3)
j = T.axis.spatial(128, i2_0 * 64 + i2_1 * 8 + i2_2 * 8 + i2_3)
k = T.axis.reduce(128, i3_1 + i3_0)
T.reads(X[b, i, k], Y[b, k, j])
T.writes(Z[b, i, j])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
Z[b, i, j] = T.float32(0)
Z[b, i, j] = Z[b, i, j] + X[b, i, k] * Y[b, k, j]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [4, 1, 16, 2]),
("SamplePerfectTile", [2, 8, 1, 8]),
("SamplePerfectTile", [128, 1]),
("SampleCategorical", 1),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [4, 1, 16, 2]),
("SamplePerfectTile", [2, 8, 1, 8]),
("SamplePerfectTile", [128, 1]),
("SampleCategorical", 1),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [4, 1, 16, 2]),
("SamplePerfectTile", [2, 8, 1, 8]),
("SamplePerfectTile", [128, 1]),
("SampleCategorical", 1),
]
mod = create_te_workload("GMM", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[gmm_0, gmm_1, gmm_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_grp():
@T.prim_func
def grp_0(inputs: T.Buffer[(1, 56, 56, 64), "float32"], weight: T.Buffer[(3, 3, 16, 128), "float32"], conv2d_nhwc: T.Buffer[(1, 28, 28, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "mai |
n", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
conv2d_nhwc_global = T.alloc_buffer([1, 28, 28, 128], dtype="float32")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 7, 1, 2):
for ax0, ax1, ax2, ax3 in T.grid(1, 9, 57, 32):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(58, i1_0 * 8 + ax1)
i2 = T.axis.spatial(58, ax2)
i3 = T.axis.spatial(64, i3_0 * 32 + ax3)
T.reads(inputs[i0, i1 - 1, i2 - 1, i3])
T.writes(PadInput[i0, i1, i2, i3])
PadInput[i0, i1, i2, i3] = T.if_then_else(1 <= i1 and i1 < 57 and 1 <= i2 and i2 < 57, inputs[i0, i1 - 1, i2 - 1, i3], T.float32(0), dtype="float32")
for i0_1, i1_1, i2_1, i3_1 in T.grid(1, 4, 1, 1):
for i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 3, 8, 1, 1, 4, 4, 3, 1, 2, 1, 1, 7, 16):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
h = T.axis.spatial(28, i1_0 * 4 + i1_1 + i1_2 + i1_3)
w = T.axis.spatial(28, i2_0 * 28 + i2_1 * 28 + i2_2 * 7 + i2_3)
co = T.axis.spatial(128, i3_0 * 64 + i3_1 * 64 + i3_2 * 16 + i3_3)
rh = T.axis.reduce(3, i4_0 * 3 + i4_1)
rw = T.axis.reduce(3, i5_0 + i5_1)
rc = T.axis.reduce(16, i6_0 * 2 + i6_1)
T.reads(PadInput[n, h * 2 + rh, w * 2 + rw, co
T.writes(conv2d_nhwc_global[n |
, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_nhwc_global[n, h, w, co] = T.float32(0)
conv2d_nhwc_global[n, h, w, co] = conv2d_nhwc_global[n, h, w, co] + PadInput[n, h * 2 + rh, w * 2 + rw, co
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 28, 64):
with T.block("conv2d_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(28, i1_0 * 4 + i1_1 + ax1)
v2 = T.axis.spatial(28, ax2)
v3 = T.axis.spatial(128, i3_0 * 64 + ax3)
T.reads(conv2d_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3]
@T.prim_func
def grp_1(inputs: T.Buffer[(1, 56, 56, 64), "float32"], weight: T.Buffer[(3, 3, 16, 128), "float32"], conv2d_nhwc: T.Buffer[(1, 28, 28, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
conv2d_nhwc_global = T.alloc_buffer([1, 28, 28, 128], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 58, 58, 64):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 1, i2_1 - 1, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(1 <= i1_1 and i1_1 < 57 and 1 <= i2_1 and i2_1 < 57, inputs[i0_1, |
i1_1 - 1, i2_1 - 1, i3_1], T.float32(0), dtype="float32")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 7, 1, 2):
for i0_1_1, i1_1_1, i2_1_1, i3_1_1, i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 4, 1, 1, 1, 3, 8, 1, 1, 4, 4, 3, 1, 2, 1, 1, 7, 16):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1_1 + i0_2)
h = T.axis.spatial(28, i1_0 * 4 + i1_1_1 + i1_2 + i1_3)
w = T.axis.spatial(28, i2_0 * 28 + i2_1_1 * 28 + i2_2 * 7 + i2_3)
co = T.axis.spatial(128, i3_0 * 64 + i3_1_1 * 64 + i3_2 * 16 + i3_3)
rh = T.axis.reduce(3, i4_0 * 3 + i4_1)
rw = T.axis.reduce(3, i5_0 + i5_1)
rc = T.axis.reduce(16, i6_0 * 2 + i6_1)
T.reads(PadInput[n, h * 2 + rh, w * 2 + rw, co
T.writes(conv2d_nhwc_global[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_nhwc_global[n, h, w, co] = T.float32(0)
conv2d_nhwc_global[n, h, w, co] = conv2d_nhwc_global[n, h, w, co] + PadInput[n, h * 2 + rh, w * 2 + rw, co
for ax0, ax1, ax2, ax3 in T.grid(1, 4, 28, 64):
with T.block("conv2d_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(28, i1_0 * 4 + ax1)
v2 = T.axis.spatial(28, ax2)
v3 = T.axis.spatial(128, i3_0 * 64 + ax3)
T.reads(conv2d_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3]
@T.prim_func
def grp_2(inputs: T.Buffer[(1, 56, 56, 64), "float32"], weight: T.Buffer[(3, 3, 16, 128), "fl |
oat32"], conv2d_nhwc: T.Buffer[(1, 28, 28, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i0_1, i1_1, i2_1, i3_1, i4_0, i5_0 in T.grid(1, 7, 1, 2, 1, 4, 1, 1, 1, 3):
for ax0, ax1, ax2, ax3 in T.grid(1, 3, 55, 32):
with T.block("PadInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(58, i1_0 * 8 + i1_1 * 2 + ax1)
i2 = T.axis.spatial(58, i5_0 + ax2)
i3 = T.axis.spatial(64, i3_0 * 32 + ax3)
T.reads(inputs[i0, i1 - 1, i2 - 1, i3])
T.writes(PadInput[i0, i1, i2, i3])
PadInput[i0, i1, i2, i3] = T.if_then_else(1 <= i1 and i1 < 57 and 1 <= i2 and i2 < 57, inputs[i0, i1 - 1, i2 - 1, i3], T.float32(0), dtype="float32")
for i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(8, 1, 1, 4, 4, 3, 1, 2, 1, 1, 7, 16):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
h = T.axis.spatial(28, i1_0 * 4 + i1_1 + i1_2 + i1_3)
w = T.axis.spatial(28, i2_0 * 28 + i2_1 * 28 + i2_2 * 7 + i2_3)
co = T.axis.spatial(128, i3_0 * 64 + i3_1 * 64 + i3_2 * 16 + i3_3)
rh = T.axis.reduce(3, i4_0 * 3 + i4_1)
rw = T.axis.reduce(3, i5_0 + i5_1)
rc = T.axis.reduce(16, i6_0 * 2 + i6_1)
T.reads(PadInput[n, h * 2 + rh, w * 2 + rw, co
T.writes(conv2d_nhwc[n, h, w, co]) |
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = conv2d_nhwc[n, h, w, co] + PadInput[n, h * 2 + rh, w * 2 + rw, co
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [7, 4, 1, 1]),
("SamplePerfectTile", [1, 1, 4, 7]),
("SamplePerfectTile", [2, 1, 4, 16]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [3, 1]),
("SamplePerfectTile", [8, 2]),
("SampleCategorical", 1),
("SampleComputeLocation", 3),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [7, 4, 1, 1]),
("SamplePerfectTile", [1, 1, 4, 7]),
("SamplePerfectTile", [2, 1, 4, 16]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [3, 1]),
("SamplePerfectTile", [8, 2]),
("SampleCategorical", 3),
("SampleComputeLocation", -1),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [7, 4, 1, 1]),
("SamplePerfectTile", [1, 1, 4, 7]),
("SamplePerfectTile", [2, 1, 4, 16]),
("SamplePerfectTile", [1, 3]),
("SamplePerfectTile", [3, 1]),
("SamplePerfectTile", [8, 2]),
("SampleCategorical", 1),
("SampleComputeLocation", 9),
]
mod = create_te_workload("GRP", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[grp_0, grp_1, grp_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_t2d():
@T.prim_func
def t2d_0(inputs: T.Buffer[(1, 4, 4, 512), "float32"], weight: T.Buffer[(4, 4, 512, 256), "float32"], conv2d_transpose_nhwc: T.Buffer[(1, 8, 8, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with |
T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 6, 6, 512], dtype="float32")
conv2d_transpose_nhwc_global = T.alloc_buffer([1, 8, 8, 256], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 6, 6, 512):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 1, i2_1 - 1, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(1 <= i1_1 and i1_1 < 5 and 1 <= i2_1 and i2_1 < 5, inputs[i0_1, i1_1 - 1, i2_1 - 1, i3_1], T.float32(0), dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i0_1_1, i1_1_1, i2_1_1, i3_1_1 in T.grid(1, 1, 2, 8, 1, 4, 1, 4):
for i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(2, 2, 64, 1, 1, 1, 1, 2, 2, 8, 1, 2, 4, 8):
with T.block("conv2d_transpose_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1_1 + i0_2)
h = T.axis.spatial(8, i1_0 * 8 + i1_1_1 * 2 + i1_2 * 2 + i1_3)
w = T.axis.spatial(8, i2_0 * 4 + i2_1_1 * 4 + i2_2 * 4 + i2_3)
co = T.axis.spatial(256, i3_0 * 32 + i3_1_1 * 8 + i3_2 * 8 + i3_3)
rh = T.axis.reduce(4, i4_0 * 2 + i4_1)
rw = T.axis.reduce(4, i5_0 * 2 + i5_1)
rc = T.axis.reduce(512, i6_0 * 8 + i6_1)
T.reads(PadInput[n, (h + rh)
T.writes(conv2d_transpose_nhwc_global[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_transpose_nhwc_global[n, h, w, co] = T.float32(0) |
conv2d_transpose_nhwc_global[n, h, w, co] = conv2d_transpose_nhwc_global[n, h, w, co] + T.if_then_else((h + rh) % 2 == 0 and (w + rw) % 2 == 0, PadInput[n, (h + rh)
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 4, 8):
with T.block("conv2d_transpose_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, i1_1_1 * 2 + ax1)
v2 = T.axis.spatial(8, i2_0 * 4 + ax2)
v3 = T.axis.spatial(256, i3_0 * 32 + i3_1_1 * 8 + ax3)
T.reads(conv2d_transpose_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_transpose_nhwc[v0, v1, v2, v3])
conv2d_transpose_nhwc[v0, v1, v2, v3] = conv2d_transpose_nhwc_global[v0, v1, v2, v3]
@T.prim_func
def t2d_1(inputs: T.Buffer[(1, 4, 4, 512), "float32"], weight: T.Buffer[(4, 4, 512, 256), "float32"], conv2d_transpose_nhwc: T.Buffer[(1, 8, 8, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
PadInput = T.alloc_buffer([1, 6, 6, 512], dtype="float32")
conv2d_transpose_nhwc_global = T.alloc_buffer([1, 8, 8, 256], dtype="float32")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 1, 2, 8):
for ax0, ax1, ax2, ax3 in T.grid(1, 6, 4, 512):
with T.block("PadInput"):
i0, i1 = T.axis.remap("SS", [ax0, ax1])
i2 = T.axis.spatial(6, i2_0 * 2 + ax2)
i3 = T.axis.spatial(512, ax3)
T.reads(inputs[i0, i1 - 1, i2 - 1, i3])
T.writes(PadInput[i0, i1, i2, i3])
PadInput[i0, i1, i2, i3] = T.if_then_else(1 <= i1 and i1 < 5 and 1 <= i2 and i2 |
< 5, inputs[i0, i1 - 1, i2 - 1, i3], T.float32(0), dtype="float32")
for i0_1, i1_1, i2_1, i3_1, i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 4, 1, 4, 2, 2, 64, 1, 1, 1, 1, 2, 2, 8, 1, 2, 4, 8):
with T.block("conv2d_transpose_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
h = T.axis.spatial(8, i1_0 * 8 + i1_1 * 2 + i1_2 * 2 + i1_3)
w = T.axis.spatial(8, i2_0 * 4 + i2_1 * 4 + i2_2 * 4 + i2_3)
co = T.axis.spatial(256, i3_0 * 32 + i3_1 * 8 + i3_2 * 8 + i3_3)
rh = T.axis.reduce(4, i4_0 * 2 + i4_1)
rw = T.axis.reduce(4, i5_0 * 2 + i5_1)
rc = T.axis.reduce(512, i6_0 * 8 + i6_1)
T.reads(PadInput[n, (h + rh)
T.writes(conv2d_transpose_nhwc_global[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_transpose_nhwc_global[n, h, w, co] = T.float32(0)
conv2d_transpose_nhwc_global[n, h, w, co] = conv2d_transpose_nhwc_global[n, h, w, co] + T.if_then_else((h + rh) % 2 == 0 and (w + rw) % 2 == 0, PadInput[n, (h + rh)
for ax0, ax1, ax2, ax3 in T.grid(1, 8, 4, 32):
with T.block("conv2d_transpose_nhwc_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
v2 = T.axis.spatial(8, i2_0 * 4 + ax2)
v3 = T.axis.spatial(256, i3_0 * 32 + ax3)
T.reads(conv2d_transpose_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_transpose_nhwc[v0, v1, v2, v3])
conv2d_transpose_nhwc[v0, v1, v2, v3] = conv2d_transpose_nhwc_global[v0, v1, v2, v3]
@T.prim_func
def t2d_2(inputs: T.Buffer[(1, 4, 4, 512), "float32"], weight: T.Buffer[(4, 4 |
, 512, 256), "float32"], conv2d_transpose_nhwc: T.Buffer[(1, 8, 8, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
for i0_0, i1_0, i2_0, i3_0, i0_1, i1_1, i2_1, i3_1, i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 1, 2, 8, 1, 4, 1, 4, 2, 2, 64, 1, 1, 1, 1, 2, 2, 8, 1, 2, 4, 8):
with T.block("conv2d_transpose_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
h = T.axis.spatial(8, i1_0 * 8 + i1_1 * 2 + i1_2 * 2 + i1_3)
w = T.axis.spatial(8, i2_0 * 4 + i2_1 * 4 + i2_2 * 4 + i2_3)
co = T.axis.spatial(256, i3_0 * 32 + i3_1 * 8 + i3_2 * 8 + i3_3)
rh = T.axis.reduce(4, i4_0 * 2 + i4_1)
rw = T.axis.reduce(4, i5_0 * 2 + i5_1)
rc = T.axis.reduce(512, i6_0 * 8 + i6_1)
T.reads(inputs[n, (h + rh)
T.writes(conv2d_transpose_nhwc[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
conv2d_transpose_nhwc[n, h, w, co] = T.float32(0)
conv2d_transpose_nhwc[n, h, w, co] = conv2d_transpose_nhwc[n, h, w, co] + T.if_then_else((h + rh) % 2 == 0 and (w + rw) % 2 == 0, T.if_then_else(1 <= (h + rh)
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 4, 1, 2]),
("SamplePerfectTile", [2, 1, 1, 4]),
("SamplePerfectTile", [8, 4, 1, 8]),
("SamplePerfectTile", [2, 2]),
("SamplePerfectTile", [2, 2]),
("SamplePerfectTile", [64, 8]),
("SampleCategorical", 2),
("SampleComputeLocation", -1),
]
decision_ |
1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 4, 1, 2]),
("SamplePerfectTile", [2, 1, 1, 4]),
("SamplePerfectTile", [8, 4, 1, 8]),
("SamplePerfectTile", [2, 2]),
("SamplePerfectTile", [2, 2]),
("SamplePerfectTile", [64, 8]),
("SampleCategorical", 2),
("SampleComputeLocation", 3),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 4, 1, 2]),
("SamplePerfectTile", [2, 1, 1, 4]),
("SamplePerfectTile", [8, 4, 1, 8]),
("SamplePerfectTile", [2, 2]),
("SamplePerfectTile", [2, 2]),
("SamplePerfectTile", [64, 8]),
("SampleCategorical", 3),
("SampleComputeLocation", -2),
]
mod = create_te_workload("T2D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[t2d_0, t2d_1, t2d_2],
expected_decisions=[decision_0, decision_1, decision_2],
debug_mask=0,
)
def test_cpu_nrm():
@T.prim_func
def nrm_0(A: T.Buffer[(1, 256, 256), "float32"], D: T.Buffer[1, "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
C = T.alloc_buffer([1], dtype="float32")
C_rf = T.alloc_buffer([1, 32768], dtype="float32")
for i0, i1_i2_fused_0, i1_i2_fused_1 in T.grid(1, 32768, 2):
with T.block("C_rf"):
vi1_i2_fused_0, b, vi1_i2_fused_1 = T.axis.remap("SSR", [i1_i2_fused_0, i0, i1_i2_fused_1])
T.reads(A[b, (vi1_i2_fused_0 * 2 + vi1_i2_fused_1)
T.writes(C_rf[b, vi1_i2_fused_0])
with T.init():
C_rf[b, vi1_i2_fused_0] = T.float32(0)
C_rf |
[b, vi1_i2_fused_0] = C_rf[b, vi1_i2_fused_0] + A[b, (vi1_i2_fused_0 * 2 + vi1_i2_fused_1)
for i0, i1_i2_fused_0 in T.grid(1, 32768):
with T.block("C"):
vi1_i2_fused_0, b = T.axis.remap("RS", [i1_i2_fused_0, i0])
T.reads(C_rf[b, vi1_i2_fused_0])
T.writes(C[b])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + C_rf[b, vi1_i2_fused_0]
for i0 in T.serial(1):
with T.block("D"):
b = T.axis.spatial(1, i0)
T.reads(C[b])
T.writes(D[b])
D[b] = T.sqrt(C[b], dtype="float32")
@T.prim_func
def nrm_1(A: T.Buffer[(1, 256, 256), "float32"], D: T.Buffer[1, "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
C = T.alloc_buffer([1], dtype="float32")
C_rf = T.alloc_buffer([1, 2], dtype="float32")
for i0, i1_i2_fused_0, i1_i2_fused_1 in T.grid(1, 32768, 2):
with T.block("C_rf"):
vi1_i2_fused_1, b, vi1_i2_fused_0 = T.axis.remap("SSR", [i1_i2_fused_1, i0, i1_i2_fused_0])
T.reads(A[b, (vi1_i2_fused_0 * 2 + vi1_i2_fused_1)
T.writes(C_rf[b, vi1_i2_fused_1])
with T.init():
C_rf[b, vi1_i2_fused_1] = T.float32(0)
C_rf[b, vi1_i2_fused_1] = C_rf[b, vi1_i2_fused_1] + A[b, (vi1_i2_fused_0 * 2 + vi1_i2_fused_1)
for i0, i1_i2_fused_1 in T.grid(1, 2):
with T.block("C"):
vi1_i2_fused_1, b = T.axis.remap("RS", [i1_i2_fused_1, i0])
T.reads(C_rf[b, vi1_i2_fused_1])
T.writes(C[b]) |
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + C_rf[b, vi1_i2_fused_1]
for i0 in T.serial(1):
with T.block("D"):
b = T.axis.spatial(1, i0)
T.reads(C[b])
T.writes(D[b])
D[b] = T.sqrt(C[b], dtype="float32")
@T.prim_func
def nrm_2(A: T.Buffer[(1, 256, 256), "float32"], D: T.Buffer[1, "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
C = T.alloc_buffer([1], dtype="float32")
for i0, i1, i2 in T.grid(1, 256, 256):
with T.block("C"):
b, i, j = T.axis.remap("SRR", [i0, i1, i2])
T.reads(A[b, i, j])
T.writes(C[b])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + A[b, i, j] * A[b, i, j]
for i0 in T.serial(1):
with T.block("D"):
b = T.axis.spatial(1, i0)
T.reads(C[b])
T.writes(D[b])
D[b] = T.sqrt(C[b], dtype="float32")
decision_0 = [
("SamplePerfectTile", [32768, 2]),
("SampleCategorical", 0),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
]
decision_1 = [
("SamplePerfectTile", [32768, 2]),
("SampleCategorical", 1),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
]
decision_2 = [
("SampleCategorical", 0),
("SampleComputeLocation", -1),
]
mod = create_te_workload("NRM", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[nrm_0, n |
rm_1, nrm_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_sfm():
@T.prim_func
def sfm_0(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum_rf = T.alloc_buffer([256, 16], dtype="float32")
T_softmax_maxelem_rf = T.alloc_buffer([256, 4], dtype="float32")
for i0, i1_0, i1_1 in T.grid(256, 4, 64):
with T.block("T_softmax_maxelem_rf"):
vi1_0, i0_1, vi1_1 = T.axis.remap("SSR", [i1_0, i0, i1_1])
T.reads(A[i0_1, vi1_0 * 64 + vi1_1])
T.writes(T_softmax_maxelem_rf[i0_1, vi1_0])
with T.init():
T_softmax_maxelem_rf[i0_1, vi1_0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem_rf[i0_1, vi1_0] = T.max(T_softmax_maxelem_rf[i0_1, vi1_0], A[i0_1, vi1_0 * 64 + vi1_1])
for i0, i1_0 in T.grid(256, 4):
with T.block("T_softmax_maxelem"):
vi1_0, i0_2 = T.axis.remap("RS", [i1_0, i0])
T.reads(T_softmax_maxelem_rf[i0_2, vi1_0])
T.writes(T_softmax_maxelem[i0_2])
with T.init():
T_softmax_maxelem[i0_2] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[i0_2] = T.max(T_softmax_maxelem[i0_2], T_softmax_maxelem_rf[i0_2, vi1_0])
for i0_3, i1_0, i1_1 in T.grid(256, 16, 16):
with T.block("T_softmax_expsum_rf"):
vi1_0, i0_4, vi1_1 = T.axis.re |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.