text
stringlengths 1
2.05k
|
---|
]) -> None:
T.func_attr({"tir.noalias": True, "global_symbol": "main"})
pad_temp = T.alloc_buffer([16, 56, 56, 64], dtype="int8")
conv2d_nhwc = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_2 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 64):
with T.block("pad_temp"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p0[i0_1, i1_1, i2_1, i3_1])
T.writes(pad_temp[i0_1, i1_1, i2_1, i3_1])
pad_temp[i0_1, i1_1, i2_1, i3_1] = p0[i0_1, i1_1, i2_1, i3_1]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(16, 56, 56, 256, 1, 1, 64):
with T.block("conv2d_nhwc"):
nn, yy, xx, ff, ry, rx, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(pad_temp[nn, yy + ry, xx + rx, rc], p1[ff, ry, rx, rc])
T.writes(conv2d_nhwc[nn, yy, xx, ff])
with T.init():
conv2d_nhwc[nn, yy, xx, ff] = 0
conv2d_nhwc[nn, yy, xx, ff] = conv2d_nhwc[nn, yy, xx, ff] + T.cast(pad_temp[nn, yy + ry, xx + rx, rc], "int32") * T.cast(p1[ff, ry, rx, rc], "int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_subtract"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(conv2d_nhwc[ax0, ax1, ax2, ax3], p2[0, 0, 0, ax3])
T.writes(T_subtract[ax0, ax1, ax2, ax3])
T_subtract[ax0, ax1, ax2, ax3] = conv2d_nhwc[ax0, ax1, ax2, ax3] - p2[0, 0, 0, ax3]
for i0, i1, i2, i3 i |
n T.grid(16, 56, 56, 256):
with T.block("T_add"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_subtract[ax0, ax1, ax2, ax3], p3[0, 0, 0, ax3])
T.writes(T_add[ax0, ax1, ax2, ax3])
T_add[ax0, ax1, ax2, ax3] = T_subtract[ax0, ax1, ax2, ax3] + p3[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("compute"):
i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2])
T.writes(compute_1[i0_2, i1_2, i2_2, i3_2])
compute_1[i0_2, i1_2, i2_2, i3_2] = T.q_multiply_shift_per_axis(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2], 31, False, True, dtype="int32")
for i0_3, i1_3, i2_3, i3_3 in T.grid(16, 56, 56, 256):
with T.block("T_add_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_3, i1_3, i2_3, i3_3])
T.reads(p7[()], compute_1[ax0, ax1, ax2, ax3])
T.writes(T_add_1[ax0, ax1, ax2, ax3])
T_add_1[ax0, ax1, ax2, ax3] = p7[()] + compute_1[ax0, ax1, ax2, ax3]
for i0_4, i1_4, i2_4, i3_4 in T.grid(16, 56, 56, 256):
with T.block("compute_1"):
i0_5, i1_5, i2_5, i3_5 = T.axis.remap("SSSS", [i0_4, i1_4, i2_4, i3_4])
T.reads(T_add_1[i0_5, i1_5, i2_5, i3_5])
T.writes(compute_2[i0_5, i1_5, i2_5, i3_5])
compute_2[i0_5, i1_5, i2_5, i3_5] = T.max(T.min(T_add_1[i0_5, i1_5, i2_5, i3_5], 255), 0)
for i0_6, i1_6, i2_6, i3_6 in T.grid(16, 56, 56, 256):
with T.block("T_subtract_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_6, i1_6, i2_6, i3_6])
T.reads(compute_2[ax0, ax1, ax2, ax3], p8[0])
T.writes(T_subtract_1[ax0, ax1, ax2, ax3])
T_subtract_1[ax0, ax1, ax2, ax3] = compute_2[ax0, ax1, ax2, ax3] - p8[0] |
for i0_7, i1_7, i2_7, i3_7 in T.grid(16, 56, 56, 256):
with T.block("compute_2"):
i0_8, i1_8, i2_8, i3_8 = T.axis.remap("SSSS", [i0_7, i1_7, i2_7, i3_7])
T.reads(T_subtract_1[i0_8, i1_8, i2_8, i3_8])
T.writes(compute[i0_8, i1_8, i2_8, i3_8])
compute[i0_8, i1_8, i2_8, i3_8] = T.q_multiply_shift(T_subtract_1[i0_8, i1_8, i2_8, i3_8], 1963325822, 31, 1, dtype="int32")
@tvm.script.ir_module
class Conv2dInt8_with_predicate_target:
@T.prim_func
def main(p0: T.Buffer[(16, 56, 56, 64), "int8"], p1: T.Buffer[(256, 1, 1, 64), "int8"], p2: T.Buffer[(1, 1, 1, 256), "int32"], p3: T.Buffer[(1, 1, 1, 256), "int32"], p4: T.Buffer[256, "int32"], p5: T.Buffer[256, "int32"], p6: T.Buffer[256, "int32"], p7: T.Buffer[(), "int32"], p8: T.Buffer[1, "int32"], p9: T.Buffer[(16, 56, 56, 256), "int32"], compute: T.Buffer[(16, 56, 56, 256), "int32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
pad_temp = T.alloc_buffer([16, 56, 56, 64], dtype="int8")
conv2d_nhwc = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_2 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_3 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_4 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add_2 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 64):
with T.block("pad_temp"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p0[i0_1, i1_1, i2_1, i3_1])
T.writes(pad_temp[i0_1, |
i1_1, i2_1, i3_1])
pad_temp[i0_1, i1_1, i2_1, i3_1] = p0[i0_1, i1_1, i2_1, i3_1]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(16, 56, 56, 256, 1, 1, 64):
with T.block("conv2d_nhwc"):
nn, yy, xx, ff, ry, rx, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(pad_temp[nn, yy + ry, xx + rx, rc], p1[ff, ry, rx, rc])
T.writes(conv2d_nhwc[nn, yy, xx, ff])
with T.init():
conv2d_nhwc[nn, yy, xx, ff] = 0
conv2d_nhwc[nn, yy, xx, ff] = conv2d_nhwc[nn, yy, xx, ff] + T.cast(pad_temp[nn, yy + ry, xx + rx, rc], "int32") * T.cast(p1[ff, ry, rx, rc], "int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_subtract"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(conv2d_nhwc[ax0, ax1, ax2, ax3], p2[0, 0, 0, ax3])
T.writes(T_subtract[ax0, ax1, ax2, ax3])
T_subtract[ax0, ax1, ax2, ax3] = conv2d_nhwc[ax0, ax1, ax2, ax3] - p2[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_add"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_subtract[ax0, ax1, ax2, ax3], p3[0, 0, 0, ax3])
T.writes(T_add[ax0, ax1, ax2, ax3])
T_add[ax0, ax1, ax2, ax3] = T_subtract[ax0, ax1, ax2, ax3] + p3[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("compute"):
i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2])
T.writes(compute_1[i0_2, i1_2, i2_2, i3_2])
compute_1[i0_2, i1_2, i2_2, i3_2] = T.q_multiply_shift_per_axis(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2], 31, False, True, dtype="int32")
for i0_3, i1_3, i2_3, i3_3 in T.grid(16, 56, 56, 256): |
with T.block("T_add_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_3, i1_3, i2_3, i3_3])
T.reads(p7[()], compute_1[ax0, ax1, ax2, ax3])
T.writes(T_add_1[ax0, ax1, ax2, ax3])
T_add_1[ax0, ax1, ax2, ax3] = p7[()] + compute_1[ax0, ax1, ax2, ax3]
for i0_4, i1_4, i2_4, i3_4 in T.grid(16, 56, 56, 256):
with T.block("compute_1"):
i0_5, i1_5, i2_5, i3_5 = T.axis.remap("SSSS", [i0_4, i1_4, i2_4, i3_4])
T.reads(T_add_1[i0_5, i1_5, i2_5, i3_5])
T.writes(compute_2[i0_5, i1_5, i2_5, i3_5])
compute_2[i0_5, i1_5, i2_5, i3_5] = T.max(T.min(T_add_1[i0_5, i1_5, i2_5, i3_5], 255), 0)
for i0_6, i1_6, i2_6, i3_6 in T.grid(16, 56, 56, 256):
with T.block("T_subtract_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_6, i1_6, i2_6, i3_6])
T.reads(compute_2[ax0, ax1, ax2, ax3], p8[0])
T.writes(T_subtract_1[ax0, ax1, ax2, ax3])
T_subtract_1[ax0, ax1, ax2, ax3] = compute_2[ax0, ax1, ax2, ax3] - p8[0]
for i0_7, i1_7, i2_7, i3_7 in T.grid(16, 56, 56, 256):
with T.block("compute_2"):
i0_8, i1_8, i2_8, i3_8 = T.axis.remap("SSSS", [i0_7, i1_7, i2_7, i3_7])
T.reads(T_subtract_1[i0_8, i1_8, i2_8, i3_8])
T.writes(compute_3[i0_8, i1_8, i2_8, i3_8])
compute_3[i0_8, i1_8, i2_8, i3_8] = T.q_multiply_shift(T_subtract_1[i0_8, i1_8, i2_8, i3_8], 1457846997, 31, 0, dtype="int32")
for i0_9, i1_9, i2_9, i3_9 in T.grid(16, 56, 56, 256):
with T.block("compute_3"):
i0_10, i1_10, i2_10, i3_10 = T.axis.remap("SSSS", [i0_9, i1_9, i2_9, i3_9])
T.reads(p9[i0_10, i1_10, i2_10, i3_10])
T.writes(compute_4[i0_10, i1_10, i2_10, i3_10])
compute_4[i0_10, i1_10, i2_10, i3_10] = T.q_multiply_shift(p9[i0_10, i1_10, i2_10, i3_10], 2101000910, 31, 0, dtype="int32")
for |
i0_11, i1_11, i2_11, i3_11 in T.grid(16, 56, 56, 256):
with T.block("T_add_2"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_11, i1_11, i2_11, i3_11])
T.reads(compute_3[ax0, ax1, ax2, ax3], compute_4[ax0, ax1, ax2, ax3])
T.writes(T_add_2[ax0, ax1, ax2, ax3])
T_add_2[ax0, ax1, ax2, ax3] = compute_3[ax0, ax1, ax2, ax3] + compute_4[ax0, ax1, ax2, ax3]
for i0_12, i1_12, i2_12, i3_12 in T.grid(16, 56, 56, 256):
with T.block("compute_4"):
i0_13, i1_13, i2_13, i3_13 = T.axis.remap("SSSS", [i0_12, i1_12, i2_12, i3_12])
T.reads(T_add_2[i0_13, i1_13, i2_13, i3_13])
T.writes(compute[i0_13, i1_13, i2_13, i3_13])
compute[i0_13, i1_13, i2_13, i3_13] = T.max(T.min(T_add_2[i0_13, i1_13, i2_13, i3_13], 255), 0)
@tvm.script.ir_module
class Conv2dInt8_with_predicate_scheduled:
@T.prim_func
def main(p0: T.Buffer[(16, 56, 56, 64), "int8"], p1: T.Buffer[(256, 1, 1, 64), "int8"], p2: T.Buffer[(1, 1, 1, 256), "int32"], p3: T.Buffer[(1, 1, 1, 256), "int32"], p4: T.Buffer[256, "int32"], p5: T.Buffer[256, "int32"], p6: T.Buffer[256, "int32"], p7: T.Buffer[(), "int32"], p8: T.Buffer[1, "int32"], p9: T.Buffer[(16, 56, 56, 256), "int32"], compute: T.Buffer[(16, 56, 56, 256), "int32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":1024})
conv2d_nhwc_reindex_shared = T.alloc_buffer([50176, 256], dtype="int32", scope="shared")
conv2d_nhwc_reindex_shared_wmma_accumulator = T.alloc_buffer([50176, 256], dtype="int32", scope="wmma.accumulator")
pad_temp_reindex_shared = T.alloc_buffer([50176, 64], dtype="int8", scope="shared")
p1_reindex_shared = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="shared")
pad_temp_reindex_shared_wmma_matrix_a |
= T.alloc_buffer([50176, 64], dtype="int8", scope="wmma.matrix_a")
p1_reindex_shared_wmma_matrix_b = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="wmma.matrix_b")
for ax2_0_0_ax3_0_0_fused in T.thread_binding(32, thread="blockIdx.y"):
for ax2_0_1_ax3_0_1_fused in T.thread_binding(196, thread="blockIdx.x"):
for ax2_0_2_ax3_0_2_fused in T.thread_binding(4, thread="threadIdx.y"):
for ax0_0, ax1_0, ax4_0_0 in T.grid(1, 1, 2):
for ax0_ax1_fused in T.serial(1024):
with T.block("pad_temp_reindex_shared"):
v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused
v1 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_fused % 32)
T.reads(p0[v0
T.writes(pad_temp_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 16]], "meta_schedule.cooperative_fetch":4})
pad_temp_reindex_shared[v0, v1] = p0[v0
for ax0_ax1_ax2_ax3_fused in T.serial(2048):
with T.block("p1_reindex_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(1, 0)
v2 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + ax0_ax1_ax2_ax3_fused
v3 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(p1[v2, v0, v1, v3])
T.writes(p1_reindex_shared[v0, v1, v2, v3])
T.block_attr({"buffer_dim_align":[[0, 2, 32, 16]], "meta_schedule.cooperative_fetch":3})
p1_reindex_shared[v0, v1, v2, v3] = p1[v2, v0, v1, v3]
for ax0_ |
1, ax1_1, ax4_0_1 in T.grid(1, 1, 2):
for ax0_0_1, ax1_0_1 in T.grid(1, 1):
with T.block("pad_temp_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused
v1_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1)
T.reads(pad_temp_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_s8_a"})
for ax0_1_1, ax1_1_1 in T.grid(16, 16):
with T.block("pad_temp_reindex_shared_wmma.matrix_a"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1_1, ax1_1_1])
T.reads(pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0, ax1, ax2_0, ax3_0 in T.grid(1, 1, 2, 1):
with T.block("p1_reindex_shared_wmma.matrix_b_o"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(1, 0)
v2_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax2_0)
v3_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1)
T.re |
ads(p1_reindex_shared[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_s8_b_trans"})
for ax2_1, ax3_1 in T.grid(16, 16):
with T.block("p1_reindex_shared_wmma.matrix_b"):
v2_i, v3_i = T.axis.remap("SS", [ax2_1, ax3_1])
T.reads(p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i])
T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i])
p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] = p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]
for ax2_0_3, ax3_0_3, ax0_2, ax1_2, ax4_0_2, ax2_0_4, ax3_0_4 in T.grid(1, 1, 1, 1, 1, 1, 2):
with T.block("conv2d_nhwc_o"):
v0 = T.axis.reduce(1, 0)
v1 = T.axis.reduce(1, 0)
v2_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused
v3_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax3_0_3 * 2 + ax3_0_4)
v4_o = T.axis.reduce(4, ax4_0_0 * 2 + ax4_0_1 + ax4_0_2)
T.reads(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 : v2_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 : v3_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared_wmma_accum |
ulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_s8s8s32_trans", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_s32", "meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "warp_execution":1})
with T.init():
for ax2_1, ax3_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_init"):
v2_i_init, v3_i_init = T.axis.remap("SS", [ax2_1, ax3_1])
T.reads()
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init])
conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init] = 0
for ax2_1, ax3_1, ax4_1 in T.grid(16, 16, 16):
with T.block("conv2d_nhwc"):
v2_i, v3_i, v4_i = T.axis.remap("SSR", [ax2_1, ax3_1, ax4_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i], pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_ |
i, v3_o * 16 + v3_i] + T.cast(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], "int32") * T.cast(p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i], "int32")
for ax0_0, ax1_0 in T.grid(1, 2):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator_o"):
v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused
v1_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax1_0)
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_s32_shared"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0, ax1_0, ax1_1, ax1_2, ax1_3 in T.grid(32, 1, 4, 32, 2):
with T.block("conv2d_nhwc_reindex_shared"):
T.where(((ax1_0 * 4 + ax1_1) * 32 + ax1_2) * 2 + ax1_3 < 64)
v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused
v1 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + (ax1_0 * 256 + |
ax1_1 * 64 + ax1_2 * 2 + ax1_3))
T.reads(p7[()], conv2d_nhwc_reindex_shared[v0, v1], p2[0, 0, 0, v1], p3[0, 0, 0, v1], p4[v1], p5[v1], p6[v1], p8[0], p9[v0
T.writes(compute[v0
compute[v0
def verify(anchor_mod, anchor_trace_fun, target_mod, target, ref):
anchor_sch = Schedule(anchor_mod)
anchor_trace_fun(anchor_sch)
anchor_trace = anchor_sch.trace
sch = Schedule(target_mod)
ms.trace_apply.schedule_using_anchor_trace(sch, anchor_trace, Target(target))
tvm.ir.assert_structural_equal(ref, sch.mod)
def test_dense_add_cpu():
def apply_anchor_trace(sch: Schedule) -> None:
b0 = sch.get_block(name="T_matmul_NT", func_name="main")
b1 = sch.get_block(name="root", func_name="main")
sch.annotate(block_or_loop=b0, ann_key="meta_schedule.tiling_structure", ann_val="SSRSRS")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8 = sch.sample_perfect_tile(
loop=l2, n=4, max_innermost_factor=64, decision=[2, 8, 4, 2]
)
l9, l10, l11, l12 = sch.split(loop=l2, factors=[v5, v6, v7, v8], preserve_unit_iters=True)
v13, v14, v15, v16 = sch.sample_perfect_tile(
loop=l3, n=4, max_innermost_factor=64, decision=[2, 1, 1, 64]
)
l17, l18, l19, l20 = sch.split(
loop=l3, factors=[v13, v14, v15, v16], preserve_unit_iters=True
)
v21, v22 = sch.sample_perfect_tile(loop=l4, n=2, max_innermost_factor=64, decision=[128, 1])
l23, l24 = sch.split(loop=l4, factors=[v21, v22], preserve_unit_iters=True)
sch.reorder(l9, l17, l10, l18, l23, l11, l19, l24, l12, l20)
b25 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="global")
sch.reverse_compute_at(block=b25, loop=l17, preserve_unit_loops=True, index=-1)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.parallel", ann_val=160)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.vectorize", an |
n_val=64)
v26 = sch.sample_categorical(
candidates=[0, 16, 64, 512], probs=[0.25, 0.25, 0.25, 0.25], decision=0
)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.unroll_explicit", ann_val=v26)
sch.enter_postproc()
b27 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b27, ann_key="meta_schedule.parallel")
sch.unannotate(block_or_loop=b27, ann_key="meta_schedule.vectorize")
sch.unannotate(block_or_loop=b27, ann_key="meta_schedule.unroll_explicit")
b28, b29 = sch.get_child_blocks(b27)
l30, l31, l32, l33, l34, l35, l36, l37, l38, l39 = sch.get_loops(block=b28)
l40 = sch.fuse(l30, l31, preserve_unit_iters=True)
sch.parallel(loop=l40)
l41 = sch.fuse(l39, preserve_unit_iters=True)
sch.vectorize(loop=l41)
l42, l43, l44 = sch.get_loops(block=b29)
l45 = sch.fuse(l42, preserve_unit_iters=True)
sch.parallel(loop=l45)
l46 = sch.fuse(l44, preserve_unit_iters=True)
sch.vectorize(loop=l46)
b47 = sch.get_block(name="T_matmul_NT", func_name="main")
l48, l49, l50, l51, l52, l53, l54, l55, l56 = sch.get_loops(block=b47)
b57 = sch.decompose_reduction(block=b47, loop=l51)
b58 = sch.get_block(name="T_matmul_NT_update", func_name="main")
b59 = sch.cache_read(block=b58, read_buffer_index=2, storage_scope="global")
sch.transform_layout(
block=b58,
buffer=("read", 2),
index_map=tvm.tir.IndexMap.from_func(
lambda i0, i1: (
floordiv(i0, 64),
i1,
floormod(i0, 64),
),
inverse_index_map=lambda i0, i1, i2: (
((i0 * 64) + i2),
i1,
),
),
pad_value=None,
)
sch.annotate(block_or_loop=b59, ann_key="meta_schedule.layout_rewrite_preproc", ann_val=1)
verify(Dense, apply_an |
chor_trace, DenseAdd, "llvm", DenseAdd_scheduled_cpu)
def test_dense_add_cpu_no_write_cache():
def apply_trace(sch):
b0 = sch.get_block(name="T_matmul_NT", func_name="main")
b1 = sch.get_block(name="root", func_name="main")
sch.annotate(block_or_loop=b0, ann_key="meta_schedule.tiling_structure", ann_val="SSRSRS")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8 = sch.sample_perfect_tile(
loop=l2, n=4, max_innermost_factor=64, decision=[4, 4, 4, 2]
)
l9, l10, l11, l12 = sch.split(loop=l2, factors=[v5, v6, v7, v8], preserve_unit_iters=True)
v13, v14, v15, v16 = sch.sample_perfect_tile(
loop=l3, n=4, max_innermost_factor=64, decision=[1, 1, 4, 32]
)
l17, l18, l19, l20 = sch.split(
loop=l3, factors=[v13, v14, v15, v16], preserve_unit_iters=True
)
v21, v22 = sch.sample_perfect_tile(loop=l4, n=2, max_innermost_factor=64, decision=[8, 16])
l23, l24 = sch.split(loop=l4, factors=[v21, v22], preserve_unit_iters=True)
sch.reorder(l9, l17, l10, l18, l23, l11, l19, l24, l12, l20)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.parallel", ann_val=160)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.vectorize", ann_val=64)
v25 = sch.sample_categorical(
candidates=[0, 16, 64, 512], probs=[0.25, 0.25, 0.25, 0.25], decision=1
)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.unroll_explicit", ann_val=v25)
sch.enter_postproc()
b26 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b26, ann_key="meta_schedule.parallel")
sch.unannotate(block_or_loop=b26, ann_key="meta_schedule.vectorize")
sch.unannotate(block_or_loop=b26, ann_key="meta_schedule.unroll_explicit")
(b27,) = sch.get_child_blocks(b26)
l28, l29, l30, l31, l32, l33, l34, l35, l36, l37 = sch.get_loops(block=b27)
l38 = sch.fuse(l28, l29, l30, l31, preserve_unit_iter |
s=True)
sch.parallel(loop=l38)
l39 = sch.fuse(l37, preserve_unit_iters=True)
sch.vectorize(loop=l39)
sch.annotate(block_or_loop=l38, ann_key="pragma_auto_unroll_max_step", ann_val=16)
sch.annotate(block_or_loop=l38, ann_key="pragma_unroll_explicit", ann_val=1)
b40 = sch.get_block(name="T_matmul_NT", func_name="main")
l41, l42, l43, l44, l45, l46, l47 = sch.get_loops(block=b40)
b48 = sch.decompose_reduction(block=b40, loop=l42)
b49 = sch.get_block(name="T_matmul_NT_update", func_name="main")
b50 = sch.cache_read(block=b49, read_buffer_index=2, storage_scope="global")
sch.transform_layout(
block=b49,
buffer=("read", 2),
index_map=tvm.tir.IndexMap.from_func(
lambda i0, i1: (
floordiv(i1, 16),
floordiv(i0, 32),
floormod(i1, 16),
floormod(i0, 32),
),
inverse_index_map=lambda i0, i1, i2, i3: (
((i1 * 32) + i3),
((i0 * 16) + i2),
),
),
pad_value=None,
)
sch.annotate(block_or_loop=b50, ann_key="meta_schedule.layout_rewrite_preproc", ann_val=1)
verify(Dense, apply_trace, DenseAdd, "llvm", DenseAdd_cpu_no_write_cache)
def test_dense_add_gpu():
def apply_anchor_trace(sch: Schedule) -> None:
b0 = sch.get_block(name="T_matmul_NT", func_name="main")
b1 = sch.get_block(name="root", func_name="main")
sch.annotate(block_or_loop=b0, ann_key="meta_schedule.tiling_structure", ann_val="SSSRRSRS")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8, v9 = sch.sample_perfect_tile(
loop=l2, n=5, max_innermost_factor=64, decision=[8, 1, 16, 1, 1]
)
l10, l11, l12, l13, l14 = sch.split(
loop=l2, factors=[v5, v6, v7, v8, v9], preserve_unit_iters=True
)
v15, v16, v17, v18, v19 = sch.sample_perfec |
t_tile(
loop=l3, n=5, max_innermost_factor=64, decision=[4, 1, 8, 4, 1]
)
l20, l21, l22, l23, l24 = sch.split(
loop=l3, factors=[v15, v16, v17, v18, v19], preserve_unit_iters=True
)
v25, v26, v27 = sch.sample_perfect_tile(
loop=l4, n=3, max_innermost_factor=64, decision=[32, 1, 4]
)
l28, l29, l30 = sch.split(loop=l4, factors=[v25, v26, v27], preserve_unit_iters=True)
sch.reorder(l10, l20, l11, l21, l12, l22, l28, l29, l13, l23, l30, l14, l24)
l31 = sch.fuse(l10, l20, preserve_unit_iters=True)
sch.bind(loop=l31, thread_axis="blockIdx.x")
l32 = sch.fuse(l11, l21, preserve_unit_iters=True)
sch.bind(loop=l32, thread_axis="vthread.x")
l33 = sch.fuse(l12, l22, preserve_unit_iters=True)
sch.bind(loop=l33, thread_axis="threadIdx.x")
sch.annotate(
block_or_loop=b0, ann_key="meta_schedule.thread_extent_low_inclusive", ann_val=16
)
sch.annotate(
block_or_loop=b0, ann_key="meta_schedule.thread_extent_high_inclusive", ann_val=256
)
b34 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="local")
sch.reverse_compute_at(block=b34, loop=l33, preserve_unit_loops=True, index=-1)
b35 = sch.cache_read(
block=b0, read_buffer_index=0, storage_scope="shared", consumer_blocks=[b0]
)
sch.compute_at(block=b35, loop=l28, preserve_unit_loops=True, index=-1)
l36, l37, l38, l39, l40, l41 = sch.get_loops(block=b35)
l42 = sch.fuse(l40, l41, preserve_unit_iters=True)
v43 = sch.sample_categorical(
candidates=[1, 2, 3, 4], probs=[0.25, 0.25, 0.25, 0.25], decision=1
)
sch.annotate(block_or_loop=b35, ann_key="meta_schedule.cooperative_fetch", ann_val=v43)
b44 = sch.cache_read(
block=b0, read_buffer_index=1, storage_scope="shared", consumer_blocks=[b0]
)
sch.compute_at(block=b44, loop=l28, preserve_unit_loops |
=True, index=-1)
l45, l46, l47, l48, l49, l50 = sch.get_loops(block=b44)
l51 = sch.fuse(l49, l50, preserve_unit_iters=True)
v52 = sch.sample_categorical(
candidates=[1, 2, 3, 4], probs=[0.25, 0.25, 0.25, 0.25], decision=3
)
sch.annotate(block_or_loop=b44, ann_key="meta_schedule.cooperative_fetch", ann_val=v52)
v53 = sch.sample_categorical(
candidates=[0, 16, 64, 512, 1024],
probs=[
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
],
decision=2,
)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.unroll_explicit", ann_val=v53)
sch.enter_postproc()
sch.unannotate(block_or_loop=b35, ann_key="meta_schedule.cooperative_fetch")
l54, l55, l56, l57, l58 = sch.get_loops(block=b35)
l59, l60, l61 = sch.split(loop=l58, factors=[None, 128, 2], preserve_unit_iters=True)
sch.vectorize(loop=l61)
sch.bind(loop=l60, thread_axis="threadIdx.x")
sch.unannotate(block_or_loop=b44, ann_key="meta_schedule.cooperative_fetch")
l62, l63, l64, l65, l66 = sch.get_loops(block=b44)
l67, l68, l69 = sch.split(loop=l66, factors=[None, 128, 4], preserve_unit_iters=True)
sch.vectorize(loop=l69)
sch.bind(loop=l68, thread_axis="threadIdx.x")
b70 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b70, ann_key="meta_schedule.unroll_explicit")
b71, b72, b73, b74 = sch.get_child_blocks(b70)
l75, l76, l77, l78, l79, l80, l81 = sch.get_loops(block=b71)
sch.annotate(block_or_loop=l75, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l75, ann_key="pragma_unroll_explicit", ann_val=1)
l82, l83, l84, l85, l86, l87, l88 = sch.get_loops(block=b72)
sch.annotate(block_or_loop=l82, ann_key="pragma_auto_unr |
oll_max_step", ann_val=64)
sch.annotate(block_or_loop=l82, ann_key="pragma_unroll_explicit", ann_val=1)
l89, l90, l91, l92, l93, l94, l95, l96, l97, l98 = sch.get_loops(block=b73)
sch.annotate(block_or_loop=l89, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l89, ann_key="pragma_unroll_explicit", ann_val=1)
l99, l100, l101, l102, l103 = sch.get_loops(block=b74)
sch.annotate(block_or_loop=l99, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l99, ann_key="pragma_unroll_explicit", ann_val=1)
b104 = sch.get_block(name="T_matmul_NT", func_name="main")
l105, l106, l107, l108, l109, l110, l111, l112, l113, l114 = sch.get_loops(block=b104)
b115 = sch.decompose_reduction(block=b104, loop=l108)
verify(Dense, apply_anchor_trace, DenseAdd, "cuda", DenseAdd_scheduled_gpu)
def test_conv2d_int8_tensorcore():
def apply_trace(sch):
b0 = sch.get_block(name="pad_temp", func_name="main")
b1 = sch.get_block(name="conv2d_nhwc", func_name="main")
b2 = sch.get_block(name="T_subtract", func_name="main")
b3 = sch.get_block(name="T_add", func_name="main")
b4 = sch.get_block(name="T_cast", func_name="main")
b5 = sch.get_block(name="T_multiply", func_name="main")
b6 = sch.get_block(name="T_add_1", func_name="main")
b7 = sch.get_block(name="T_right_shift", func_name="main")
b8 = sch.get_block(name="T_cast_1", func_name="main")
b9 = sch.get_block(name="T_add_2", func_name="main")
b10 = sch.get_block(name="compute", func_name="main")
b11 = sch.get_block(name="T_cast_2", func_name="main")
b12 = sch.get_block(name="T_cast_3", func_name="main")
b13 = sch.get_block(name="T_subtract_1", func_name="main")
b14 = sch.get_block(name="compute_1", func_name="main")
b15 = sch.get_block(name="root", func_name="main")
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.tili |
ng_structure", ann_val="SSSRRSRS")
b16 = sch.reindex(block=b1, buffer=("write", 0))
b17 = sch.reindex(block=b1, buffer=("read", 0))
b18 = sch.reindex(block=b1, buffer=("read", 1))
sch.transform_layout(
block=b1,
buffer=("read", 0),
index_map=lambda nn, yy, xx, rc: (
(((nn * 3136) + (yy * 56)) + xx),
rc,
),
pad_value=None,
)
sch.transform_layout(
block=b1,
buffer=("read", 1),
index_map=lambda ff, ry, rx, rc: (
ry,
rx,
ff,
rc,
),
pad_value=None,
)
sch.transform_layout(
block=b1,
buffer=("write", 0),
index_map=lambda nn, yy, xx, ff: (
(((nn * 3136) + (yy * 56)) + xx),
ff,
),
pad_value=None,
)
sch.transform_block_layout(
block=b16,
index_map=lambda nn, yy, xx, ff: (
(((nn * 3136) + (yy * 56)) + xx),
ff,
),
)
sch.transform_block_layout(
block=b17,
index_map=lambda nn, yy, xx, rc: (
(((nn * 3136) + (yy * 56)) + xx),
rc,
),
)
sch.transform_block_layout(
block=b18,
index_map=lambda ff, ry, rx, rc: (
ry,
rx,
ff,
rc,
),
)
sch.transform_block_layout(
block=b1,
index_map=lambda nn, yy, xx, ff, ry, rx, rc: (
ry,
rx,
(((nn * 3136) + (yy * 56)) + xx),
ff,
rc,
),
)
l19, l20, l21, l22, l23 = sch.get_loops(block=b1)
l24, l25 = sch.split(loop=l23, factors=[None, 16], preserve_unit_iters=True)
l26, l27 = sch.split(loop=l22, fact |
ors=[None, 16], preserve_unit_iters=True)
l28, l29 = sch.split(loop=l21, factors=[None, 16], preserve_unit_iters=True)
l30, l31, l32, l33, l34, l35, l36, l37 = sch.get_loops(block=b1)
sch.reorder(l34, l36, l29, l27, l25)
b38 = sch.blockize(loop=l29)
sch.annotate(
block_or_loop=b38,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_sync_16x16x16_s8s8s32_trans",
)
sch.annotate(
block_or_loop=b38,
ann_key="meta_schedule.auto_tensorize_init",
ann_val="wmma_fill_16x16x16_s32",
)
sch.annotate(block_or_loop=b38, ann_key="warp_execution", ann_val=1)
l39, l40, l41, l42, l43 = sch.get_loops(block=b38)
v44, v45, v46 = sch.sample_perfect_tile(
loop=l39, n=3, max_innermost_factor=4, decision=[1, 1, 1]
)
l47, l48, l49 = sch.split(loop=l39, factors=[v44, v45, v46], preserve_unit_iters=True)
v50, v51, v52 = sch.sample_perfect_tile(
loop=l40, n=3, max_innermost_factor=4, decision=[1, 1, 1]
)
l53, l54, l55 = sch.split(loop=l40, factors=[v50, v51, v52], preserve_unit_iters=True)
v56, v57, v58, v59, v60 = sch.sample_perfect_tile(
loop=l41, n=5, max_innermost_factor=4, decision=[392, 1, 8, 1, 1]
)
l61, l62, l63, l64, l65 = sch.split(
loop=l41, factors=[v56, v57, v58, v59, v60], preserve_unit_iters=True
)
v66, v67, v68, v69, v70 = sch.sample_perfect_tile(
loop=l42, n=5, max_innermost_factor=4, decision=[8, 1, 2, 1, 1]
)
l71, l72, l73, l74, l75 = sch.split(
loop=l42, factors=[v66, v67, v68, v69, v70], preserve_unit_iters=True
)
v76, v77, v78 = sch.sample_perfect_tile(
loop=l43, n=3, max_innermost_factor=4, decision=[2, 1, 2]
)
l79, l80, l81 = sch.split(loop=l43, factors=[v76, v77, v78], preserve_unit_iters=True)
sch.reorder(
l61,
l71, |
l62,
l72,
l63,
l73,
l47,
l53,
l79,
l48,
l54,
l80,
l64,
l74,
l49,
l55,
l81,
l65,
l75,
)
l82 = sch.fuse(l61, l71, preserve_unit_iters=True)
sch.bind(loop=l82, thread_axis="blockIdx.x")
l83 = sch.fuse(l62, l72, preserve_unit_iters=True)
sch.bind(loop=l83, thread_axis="vthread.x")
l84 = sch.fuse(l63, l73, preserve_unit_iters=True)
sch.bind(loop=l84, thread_axis="threadIdx.x")
sch.annotate(
block_or_loop=b38, ann_key="meta_schedule.thread_extent_low_inclusive", ann_val=32
)
sch.annotate(
block_or_loop=b38, ann_key="meta_schedule.thread_extent_high_inclusive", ann_val=1024
)
b85 = sch.cache_write(block=b38, write_buffer_index=0, storage_scope="shared")
sch.reverse_compute_at(block=b85, loop=l83, preserve_unit_loops=True, index=-1)
b86 = sch.cache_write(block=b38, write_buffer_index=0, storage_scope="wmma.accumulator")
sch.reverse_compute_at(block=b86, loop=l84, preserve_unit_loops=True, index=-1)
v87 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=0,
)
sch.annotate(block_or_loop=b85, ann_key="meta_schedule.cooperative_fetch", ann_val=v87)
sch.reverse_compute_inline(block=b16)
l88, l89, l90, l91, l92 = sch.get_loops(block=b86)
l93, l94 = sch.split(loop=l92, factors=[None, 16], preserve_unit_iters=True)
l95, l96 = sch.split(loop=l91, factors=[None, 16], preserve_unit_iters=True)
l97, l98, l99, l100, l101, l102, l103 = sch. |
get_loops(block=b86)
sch.reorder(l102, l96, l94)
b104 = sch.blockize(loop=l96)
sch.annotate(
block_or_loop=b104,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_store_16x16x16_s32_shared",
)
b105 = sch.cache_read(
block=b38, read_buffer_index=0, storage_scope="shared", consumer_blocks=[b38]
)
sch.compute_at(block=b105, loop=l79, preserve_unit_loops=True, index=-1)
l106, l107, l108, l109, l110, l111, l112, l113 = sch.get_loops(block=b105)
l114 = sch.fuse(l112, l113, preserve_unit_iters=True)
v115 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=5,
)
sch.annotate(block_or_loop=b105, ann_key="meta_schedule.cooperative_fetch", ann_val=v115)
b116 = sch.cache_read(
block=b38, read_buffer_index=1, storage_scope="shared", consumer_blocks=[b38]
)
sch.compute_at(block=b116, loop=l79, preserve_unit_loops=True, index=-1)
l117, l118, l119, l120, l121, l122, l123, l124, l125, l126 = sch.get_loops(block=b116)
l127 = sch.fuse(l123, l124, l125, l126, preserve_unit_iters=True)
v128 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=4,
)
sch.annotate(block_or_loop=b116, ann_key="meta_schedule.cooperative_fetch", ann_val=v128)
b129 = sch.cache_read(block=b38, read_buffer_index=0, storage_scope="wmma.matrix_a") |
sch.compute_at(block=b129, loop=l80, preserve_unit_loops=True, index=-1)
l130, l131, l132, l133, l134, l135, l136, l137, l138, l139, l140 = sch.get_loops(block=b129)
l141, l142 = sch.split(loop=l140, factors=[None, 16], preserve_unit_iters=True)
l143, l144 = sch.split(loop=l139, factors=[None, 16], preserve_unit_iters=True)
(
l145,
l146,
l147,
l148,
l149,
l150,
l151,
l152,
l153,
l154,
l155,
l156,
l157,
) = sch.get_loops(block=b129)
sch.reorder(l156, l144, l142)
b158 = sch.blockize(loop=l144)
sch.annotate(
block_or_loop=b158,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_load_16x16x16_s8_a",
)
b159 = sch.cache_read(block=b38, read_buffer_index=1, storage_scope="wmma.matrix_b")
sch.compute_at(block=b159, loop=l80, preserve_unit_loops=True, index=-1)
(
l160,
l161,
l162,
l163,
l164,
l165,
l166,
l167,
l168,
l169,
l170,
l171,
l172,
) = sch.get_loops(block=b159)
l173, l174 = sch.split(loop=l172, factors=[None, 16], preserve_unit_iters=True)
l175, l176 = sch.split(loop=l171, factors=[None, 16], preserve_unit_iters=True)
(
l177,
l178,
l179,
l180,
l181,
l182,
l183,
l184,
l185,
l186,
l187,
l188,
l189,
l190,
l191,
) = sch.get_loops(block=b159)
sch.reorder(l190, l176, l174)
b192 = sch.blockize(loop=l176)
sch.annotate(
block_or_loop=b192,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_load_16x16x16_s8_ |
b_trans",
)
sch.compute_inline(block=b17)
sch.compute_inline(block=b18)
sch.storage_align(block=b105, buffer_index=0, axis=-2, factor=32, offset=16)
sch.storage_align(block=b116, buffer_index=0, axis=-2, factor=32, offset=16)
sch.reverse_compute_inline(block=b14)
sch.reverse_compute_inline(block=b13)
sch.reverse_compute_inline(block=b12)
sch.reverse_compute_inline(block=b11)
sch.reverse_compute_inline(block=b10)
sch.reverse_compute_inline(block=b9)
sch.reverse_compute_inline(block=b8)
sch.reverse_compute_inline(block=b7)
sch.reverse_compute_inline(block=b6)
sch.reverse_compute_inline(block=b5)
sch.reverse_compute_inline(block=b4)
sch.reverse_compute_inline(block=b3)
sch.reverse_compute_inline(block=b2)
sch.compute_inline(block=b0)
v193 = sch.sample_categorical(
candidates=[0, 16, 64, 512, 1024],
probs=[
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
],
decision=3,
)
sch.annotate(block_or_loop=b15, ann_key="meta_schedule.unroll_explicit", ann_val=v193)
sch.enter_postproc()
sch.unannotate(block_or_loop=b85, ann_key="meta_schedule.cooperative_fetch")
l194, l195, l196, l197 = sch.get_loops(block=b85)
l198, l199 = sch.split(loop=l197, factors=[None, 16], preserve_unit_iters=True)
sch.bind(loop=l199, thread_axis="threadIdx.x")
sch.unannotate(block_or_loop=b105, ann_key="meta_schedule.cooperative_fetch")
l200, l201, l202, l203, l204, l205, l206 = sch.get_loops(block=b105)
l207, l208, l209 = sch.split(loop=l206, factors=[None, 16, 16], preserve_unit_iters=True)
sch.vectorize(loop=l209)
sch.bind(loop=l208, thread_axis="threadIdx.x")
sch.unannotate(block_or_loop=b116, ann_key="meta |
_schedule.cooperative_fetch")
l210, l211, l212, l213, l214, l215, l216 = sch.get_loops(block=b116)
l217, l218, l219 = sch.split(loop=l216, factors=[None, 16, 8], preserve_unit_iters=True)
sch.vectorize(loop=l219)
sch.bind(loop=l218, thread_axis="threadIdx.x")
b220 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b220, ann_key="meta_schedule.unroll_explicit")
b221, b222, b223, b224, b225, b226, b227 = sch.get_child_blocks(b220)
l228, l229, l230, l231, l232, l233, l234, l235, l236 = sch.get_loops(block=b221)
sch.annotate(block_or_loop=l228, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l228, ann_key="pragma_unroll_explicit", ann_val=1)
l237, l238, l239, l240, l241, l242, l243, l244, l245 = sch.get_loops(block=b222)
sch.annotate(block_or_loop=l237, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l237, ann_key="pragma_unroll_explicit", ann_val=1)
l246, l247, l248, l249, l250, l251, l252, l253, l254, l255, l256 = sch.get_loops(block=b223)
sch.annotate(block_or_loop=l246, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l246, ann_key="pragma_unroll_explicit", ann_val=1)
(
l257,
l258,
l259,
l260,
l261,
l262,
l263,
l264,
l265,
l266,
l267,
l268,
l269,
) = sch.get_loops(block=b224)
sch.annotate(block_or_loop=l257, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l257, ann_key="pragma_unroll_explicit", ann_val=1)
(
l270,
l271,
l272,
l273,
l274,
l275,
l276,
l277,
l278,
l279,
l280,
l281,
l282,
l |
283,
l284,
l285,
) = sch.get_loops(block=b225)
sch.annotate(block_or_loop=l270, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l270, ann_key="pragma_unroll_explicit", ann_val=1)
l286, l287, l288, l289, l290 = sch.get_loops(block=b226)
sch.annotate(block_or_loop=l286, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l286, ann_key="pragma_unroll_explicit", ann_val=1)
l291, l292, l293, l294, l295 = sch.get_loops(block=b227)
sch.annotate(block_or_loop=l291, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l291, ann_key="pragma_unroll_explicit", ann_val=1)
b296 = sch.get_block(name="conv2d_nhwc_o", func_name="main")
(
l297,
l298,
l299,
l300,
l301,
l302,
l303,
l304,
l305,
l306,
l307,
l308,
l309,
l310,
l311,
l312,
) = sch.get_loops(block=b296)
b313 = sch.decompose_reduction(block=b296, loop=l302)
sch.unannotate(block_or_loop=b313, ann_key="meta_schedule.auto_tensorize")
sch.annotate(
block_or_loop=b313,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_fill_16x16x16_s32",
)
sch.unannotate(block_or_loop=b296, ann_key="meta_schedule.auto_tensorize_init")
sch.unannotate(block_or_loop=b313, ann_key="meta_schedule.auto_tensorize_init")
b314 = sch.get_block(name="conv2d_nhwc_o_init", func_name="main")
sch.unannotate(block_or_loop=b314, ann_key="meta_schedule.auto_tensorize")
sch.tensorize(block_or_loop=b314, tensor_intrin="wmma_fill_16x16x16_s32")
b315 = sch.get_block(name="pad_temp_reindex_shared_wmma.matrix_a_o", func_name="main")
sch.unannotate(block_or_loop=b315, ann_key="meta_schedule.auto_tensorize |
")
sch.tensorize(block_or_loop=b315, tensor_intrin="wmma_load_16x16x16_s8_a")
b316 = sch.get_block(name="p1_reindex_shared_wmma.matrix_b_o", func_name="main")
sch.unannotate(block_or_loop=b316, ann_key="meta_schedule.auto_tensorize")
sch.tensorize(block_or_loop=b316, tensor_intrin="wmma_load_16x16x16_s8_b_trans")
b317 = sch.get_block(name="conv2d_nhwc_o_update", func_name="main")
sch.unannotate(block_or_loop=b317, ann_key="meta_schedule.auto_tensorize")
sch.tensorize(block_or_loop=b317, tensor_intrin="wmma_sync_16x16x16_s8s8s32_trans")
b318 = sch.get_block(name="conv2d_nhwc_reindex_shared_wmma.accumulator_o", func_name="main")
sch.unannotate(block_or_loop=b318, ann_key="meta_schedule.auto_tensorize")
sch.tensorize(block_or_loop=b318, tensor_intrin="wmma_store_16x16x16_s32_shared")
verify(Conv2dInt8, apply_trace, Conv2dInt8_target, "cuda", Conv2dInt8_tensorcore_scheduled)
def test_conv2d_int8_vnni():
def apply_trace(sch):
b0 = sch.get_block(name="compile_engine_const", func_name="main")
b1 = sch.get_block(name="conv2d_NCHWc_int8", func_name="main")
b2 = sch.get_block(name="T_add", func_name="main")
b3 = sch.get_block(name="T_cast", func_name="main")
b4 = sch.get_block(name="T_multiply", func_name="main")
b5 = sch.get_block(name="compile_engine_const_1", func_name="main")
b6 = sch.get_block(name="T_add_1", func_name="main")
b7 = sch.get_block(name="T_floor", func_name="main")
b8 = sch.get_block(name="T_cast_1", func_name="main")
b9 = sch.get_block(name="compute", func_name="main")
b10 = sch.get_block(name="T_cast_2", func_name="main")
b11 = sch.get_block(name="T_cast_3", func_name="main")
b12 = sch.get_block(name="T_subtract", func_name="main")
b13 = sch.get_block(name="T_multiply_1", func_name="main")
b14 = sch.get_block(name="compile_engine_const_2", func_name="main")
b15 = sch.get_block(name="T_ad |
d_2", func_name="main")
b16 = sch.get_block(name="T_floor_1", func_name="main")
b17 = sch.get_block(name="T_cast_4", func_name="main")
b18 = sch.get_block(name="T_add_3", func_name="main")
b19 = sch.get_block(name="compute_1", func_name="main")
b20 = sch.get_block(name="T_cast_5", func_name="main")
b21 = sch.get_block(name="root", func_name="main")
sch.compute_inline(block=b20)
sch.compute_inline(block=b19)
sch.compute_inline(block=b18)
sch.compute_inline(block=b17)
sch.compute_inline(block=b16)
sch.compute_inline(block=b15)
sch.compute_inline(block=b14)
sch.compute_inline(block=b13)
sch.compute_inline(block=b12)
sch.compute_inline(block=b11)
sch.compute_inline(block=b10)
sch.compute_inline(block=b9)
sch.compute_inline(block=b8)
sch.compute_inline(block=b7)
sch.compute_inline(block=b6)
sch.compute_inline(block=b5)
sch.compute_inline(block=b4)
sch.compute_inline(block=b3)
sch.compute_inline(block=b2)
sch.compute_inline(block=b0)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.tiling_structure", ann_val="SSRSRS")
l22, l23, l24, l25, l26, l27, l28, l29, l30, l31 = sch.get_loops(block=b1)
l32, l33 = sch.split(loop=l31, factors=[None, 4], preserve_unit_iters=True)
l34, l35 = sch.split(loop=l26, factors=[None, 16], preserve_unit_iters=True)
l36, l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47 = sch.get_loops(block=b1)
sch.reorder(l42, l43, l44, l45, l46, l35, l33)
b48 = sch.blockize(loop=l35)
sch.annotate(
block_or_loop=b48, ann_key="meta_schedule.auto_tensorize", ann_val="dot_16x4_vnni"
)
l49, l50, l51, l52, l53, l54, l55, l56, l57, l58 = sch.get_loops(block=b48)
v59, v60, v61, v62 = sch.sample_perfect_tile(
loop=l49, n=4, max_innermost_factor=64, decision=[1, 1, 1, 1]
)
l63, |
l64, l65, l66 = sch.split(
loop=l49, factors=[v59, v60, v61, v62], preserve_unit_iters=True
)
v67, v68, v69, v70 = sch.sample_perfect_tile(
loop=l50, n=4, max_innermost_factor=64, decision=[4, 32, 1, 1]
)
l71, l72, l73, l74 = sch.split(
loop=l50, factors=[v67, v68, v69, v70], preserve_unit_iters=True
)
v75, v76, v77, v78 = sch.sample_perfect_tile(
loop=l51, n=4, max_innermost_factor=64, decision=[1, 7, 1, 1]
)
l79, l80, l81, l82 = sch.split(
loop=l51, factors=[v75, v76, v77, v78], preserve_unit_iters=True
)
v83, v84, v85, v86 = sch.sample_perfect_tile(
loop=l52, n=4, max_innermost_factor=64, decision=[1, 1, 1, 7]
)
l87, l88, l89, l90 = sch.split(
loop=l52, factors=[v83, v84, v85, v86], preserve_unit_iters=True
)
v91, v92, v93, v94 = sch.sample_perfect_tile(
loop=l53, n=4, max_innermost_factor=64, decision=[1, 1, 1, 1]
)
l95, l96, l97, l98 = sch.split(
loop=l53, factors=[v91, v92, v93, v94], preserve_unit_iters=True
)
v99, v100 = sch.sample_perfect_tile(loop=l54, n=2, max_innermost_factor=64, decision=[1, 1])
l101, l102 = sch.split(loop=l54, factors=[v99, v100], preserve_unit_iters=True)
v103, v104 = sch.sample_perfect_tile(
loop=l55, n=2, max_innermost_factor=64, decision=[1, 1]
)
l105, l106 = sch.split(loop=l55, factors=[v103, v104], preserve_unit_iters=True)
v107, v108 = sch.sample_perfect_tile(
loop=l56, n=2, max_innermost_factor=64, decision=[4, 8]
)
l109, l110 = sch.split(loop=l56, factors=[v107, v108], preserve_unit_iters=True)
v111, v112 = sch.sample_perfect_tile(
loop=l57, n=2, max_innermost_factor=64, decision=[4, 1]
)
l113, l114 = sch.split(loop=l57, factors=[v111, v112], preserve_unit_iters=True)
v115, v116 = sch.sample_perfect_tile( |
loop=l58, n=2, max_innermost_factor=64, decision=[1, 1]
)
l117, l118 = sch.split(loop=l58, factors=[v115, v116], preserve_unit_iters=True)
sch.reorder(
l63,
l71,
l79,
l87,
l95,
l64,
l72,
l80,
l88,
l96,
l101,
l105,
l109,
l113,
l117,
l65,
l73,
l81,
l89,
l97,
l102,
l106,
l110,
l114,
l118,
l66,
l74,
l82,
l90,
l98,
)
(b119,) = sch.get_consumers(block=b48)
sch.reverse_compute_at(block=b119, loop=l96, preserve_unit_loops=True, index=-1)
sch.annotate(block_or_loop=b21, ann_key="meta_schedule.parallel", ann_val=96)
sch.annotate(block_or_loop=b21, ann_key="meta_schedule.vectorize", ann_val=64)
v120 = sch.sample_categorical(
candidates=[0, 16, 64, 512], probs=[0.25, 0.25, 0.25, 0.25], decision=2
)
sch.annotate(block_or_loop=b21, ann_key="meta_schedule.unroll_explicit", ann_val=v120)
sch.enter_postproc()
b121 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b121, ann_key="meta_schedule.parallel")
sch.unannotate(block_or_loop=b121, ann_key="meta_schedule.vectorize")
sch.unannotate(block_or_loop=b121, ann_key="meta_schedule.unroll_explicit")
b122, b123 = sch.get_child_blocks(b121)
(
l124,
l125,
l126,
l127,
l128,
l129,
l130,
l131,
l132,
l133,
l134,
l135,
l136,
l137,
l138,
l139,
l140,
l141,
l142,
l143,
l144,
l145, |
l146,
l147,
l148,
l149,
l150,
l151,
l152,
l153,
) = sch.get_loops(block=b122)
l154 = sch.fuse(l124, l125, l126, l127, l128, l129, l130, preserve_unit_iters=True)
sch.parallel(loop=l154)
sch.annotate(block_or_loop=l154, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l154, ann_key="pragma_unroll_explicit", ann_val=1)
l155, l156, l157, l158, l159, l160, l161, l162, l163 = sch.get_loops(block=b123)
l164 = sch.fuse(l163, preserve_unit_iters=True)
sch.vectorize(loop=l164)
sch.annotate(block_or_loop=l155, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l155, ann_key="pragma_unroll_explicit", ann_val=1)
b165 = sch.get_block(name="conv2d_NCHWc_int8_o", func_name="main")
(
l166,
l167,
l168,
l169,
l170,
l171,
l172,
l173,
l174,
l175,
l176,
l177,
l178,
l179,
l180,
l181,
l182,
l183,
l184,
l185,
l186,
l187,
l188,
l189,
) = sch.get_loops(block=b165)
b190 = sch.decompose_reduction(block=b165, loop=l172)
sch.unannotate(block_or_loop=b190, ann_key="meta_schedule.auto_tensorize")
sch.annotate(block_or_loop=b190, ann_key="meta_schedule.auto_tensorize", ann_val="")
b191 = sch.get_block(name="conv2d_NCHWc_int8_o_init", func_name="main")
sch.unannotate(block_or_loop=b191, ann_key="meta_schedule.auto_tensorize")
(b192,) = sch.get_child_blocks(b191)
(l193,) = sch.get_loops(block=b192)
sch.vectorize(loop=l193)
b194 = sch.get_block(name="conv2d_NCHWc_int8_o_update", func_name="main")
sch.unannotate(block_or_loop=b194, ann_key="meta_s |
chedule.auto_tensorize")
sch.tensorize(block_or_loop=b194, tensor_intrin="dot_16x4_vnni")
vnni_id = llvm_lookup_intrinsic_id("llvm.x86.avx512.vpdpbusd.512")
verify(
Conv2dInt8_NCHWc,
apply_trace,
Conv2dInt8_NCHWc_target,
"llvm -mcpu=cascadelake",
get_conv2d_vnni_mod(vnni_id),
)
def test_winograd_gpu():
def apply_trace(sch):
b0 = sch.get_block(name="B", func_name="main")
b1 = sch.get_block(name="data_pack", func_name="main")
b2 = sch.get_block(name="bgemm", func_name="main")
b3 = sch.get_block(name="A", func_name="main")
b4 = sch.get_block(name="inverse", func_name="main")
b5 = sch.get_block(name="conv2d_winograd", func_name="main")
b6 = sch.get_block(name="T_add", func_name="main")
b7 = sch.get_block(name="T_relu", func_name="main")
b8 = sch.get_block(name="root", func_name="main")
sch.compute_inline(block=b0)
(b9,) = sch.get_producers(block=b1)
(b10,) = sch.get_producers(block=b9)
l11, l12, l13, l14, l15, l16 = sch.get_loops(block=b1)
v17, v18 = sch.sample_perfect_tile(
loop=l13, n=2, max_innermost_factor=64, decision=[14, 14]
)
l19, l20 = sch.split(loop=l13, factors=[v17, v18], preserve_unit_iters=True)
v21, v22 = sch.sample_perfect_tile(loop=l14, n=2, max_innermost_factor=64, decision=[8, 8])
l23, l24 = sch.split(loop=l14, factors=[v21, v22], preserve_unit_iters=True)
sch.unroll(loop=l11)
sch.unroll(loop=l12)
sch.unroll(loop=l15)
sch.unroll(loop=l16)
sch.reorder(l19, l23, l20, l24, l11, l12, l15, l16)
sch.compute_at(block=b9, loop=l24, preserve_unit_loops=True, index=-1)
sch.set_scope(block=b9, buffer_index=0, storage_scope="local")
sch.compute_inline(block=b10)
l25, l26, l27, l28, l29, l30, l31, l32 = sch.get_loops(block=b1)
l33 = sch.fuse(l25, l26, l27, l28, preserve_unit_iters=True)
v34 = sch.sample_ca |
tegorical(
candidates=[32, 64, 128, 256, 512, 1024],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=2,
)
l35, l36 = sch.split(loop=l33, factors=[None, v34], preserve_unit_iters=True)
sch.bind(loop=l35, thread_axis="blockIdx.x")
sch.bind(loop=l36, thread_axis="threadIdx.x")
sch.compute_inline(block=b3)
l37, l38, l39, l40, l41, l42 = sch.get_loops(block=b4)
v43, v44 = sch.sample_perfect_tile(loop=l39, n=2, max_innermost_factor=64, decision=[28, 7])
l45, l46 = sch.split(loop=l39, factors=[v43, v44], preserve_unit_iters=True)
v47, v48 = sch.sample_perfect_tile(loop=l40, n=2, max_innermost_factor=64, decision=[2, 32])
l49, l50 = sch.split(loop=l40, factors=[v47, v48], preserve_unit_iters=True)
sch.unroll(loop=l37)
sch.unroll(loop=l38)
sch.unroll(loop=l41)
sch.unroll(loop=l42)
sch.reorder(l45, l49, l46, l50, l37, l38, l41, l42)
l51, l52, l53, l54, l55, l56, l57, l58 = sch.get_loops(block=b4)
l59 = sch.fuse(l51, l52, l53, l54, preserve_unit_iters=True)
v60 = sch.sample_categorical(
candidates=[32, 64, 128, 256, 512, 1024],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=4,
)
l61, l62 = sch.split(loop=l59, factors=[None, v60], preserve_unit_iters=True)
sch.bind(loop=l61, thread_axis="blockIdx.x")
sch.bind(loop=l62, thread_axis="threadIdx.x")
sch.annotate(block_or_loop=b2, ann_key="meta_schedule.tiling_structure", ann_val="SSSRRSRS")
l63, l64, l65, l66, l67 |
= sch.get_loops(block=b2)
v68, v69, v70, v71, v72 = sch.sample_perfect_tile(
loop=l63, n=5, max_innermost_factor=64, decision=[1, 2, 3, 1, 1]
)
l73, l74, l75, l76, l77 = sch.split(
loop=l63, factors=[v68, v69, v70, v71, v72], preserve_unit_iters=True
)
v78, v79, v80, v81, v82 = sch.sample_perfect_tile(
loop=l64, n=5, max_innermost_factor=64, decision=[6, 1, 1, 1, 1]
)
l83, l84, l85, l86, l87 = sch.split(
loop=l64, factors=[v78, v79, v80, v81, v82], preserve_unit_iters=True
)
v88, v89, v90, v91, v92 = sch.sample_perfect_tile(
loop=l65, n=5, max_innermost_factor=64, decision=[7, 2, 1, 14, 1]
)
l93, l94, l95, l96, l97 = sch.split(
loop=l65, factors=[v88, v89, v90, v91, v92], preserve_unit_iters=True
)
v98, v99, v100, v101, v102 = sch.sample_perfect_tile(
loop=l66, n=5, max_innermost_factor=64, decision=[4, 1, 16, 1, 1]
)
l103, l104, l105, l106, l107 = sch.split(
loop=l66, factors=[v98, v99, v100, v101, v102], preserve_unit_iters=True
)
v108, v109, v110 = sch.sample_perfect_tile(
loop=l67, n=3, max_innermost_factor=64, decision=[2, 2, 16]
)
l111, l112, l113 = sch.split(loop=l67, factors=[v108, v109, v110], preserve_unit_iters=True)
sch.reorder(
l73,
l83,
l93,
l103,
l74,
l84,
l94,
l104,
l75,
l85,
l95,
l105,
l111,
l112,
l76,
l86,
l96,
l106,
l113,
l77,
l87,
l97,
l107,
)
l114 = sch.fuse(l73, l83, l93, l103, preserve_unit_iters=True)
sch.bind(loop=l114, thread_axis="blockIdx.x")
l115 = sch.fuse(l74, l84, l94, l104, preserve_unit_iters=True)
sch.bind |
(loop=l115, thread_axis="vthread.x")
l116 = sch.fuse(l75, l85, l95, l105, preserve_unit_iters=True)
sch.bind(loop=l116, thread_axis="threadIdx.x")
sch.annotate(
block_or_loop=b2, ann_key="meta_schedule.thread_extent_low_inclusive", ann_val=32
)
sch.annotate(
block_or_loop=b2, ann_key="meta_schedule.thread_extent_high_inclusive", ann_val=1024
)
b117 = sch.cache_write(block=b2, write_buffer_index=0, storage_scope="local")
sch.reverse_compute_at(block=b117, loop=l116, preserve_unit_loops=True, index=-1)
b118 = sch.cache_read(
block=b2, read_buffer_index=0, storage_scope="shared", consumer_blocks=[b2]
)
sch.compute_at(block=b118, loop=l111, preserve_unit_loops=True, index=-1)
l119, l120, l121, l122, l123, l124, l125, l126 = sch.get_loops(block=b118)
l127 = sch.fuse(l123, l124, l125, l126, preserve_unit_iters=True)
v128 = sch.sample_categorical(
candidates=[1, 2, 3, 4], probs=[0.25, 0.25, 0.25, 0.25], decision=3
)
sch.annotate(block_or_loop=b118, ann_key="meta_schedule.cooperative_fetch", ann_val=v128)
b129 = sch.cache_read(
block=b2, read_buffer_index=1, storage_scope="shared", consumer_blocks=[b2]
)
sch.compute_at(block=b129, loop=l111, preserve_unit_loops=True, index=-1)
l130, l131, l132, l133, l134, l135, l136, l137 = sch.get_loops(block=b129)
l138 = sch.fuse(l134, l135, l136, l137, preserve_unit_iters=True)
v139 = sch.sample_categorical(
candidates=[1, 2, 3, 4], probs=[0.25, 0.25, 0.25, 0.25], decision=3
)
sch.annotate(block_or_loop=b129, ann_key="meta_schedule.cooperative_fetch", ann_val=v139)
sch.reverse_compute_inline(block=b7)
sch.reverse_compute_inline(block=b6)
v140 = sch.sample_categorical(
candidates=[0, 16, 64, 512, 1024],
probs=[
0.20000000000000001,
0.2000000000000 |
0001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
],
decision=4,
)
sch.annotate(block_or_loop=b8, ann_key="meta_schedule.unroll_explicit", ann_val=v140)
l141, l142, l143, l144 = sch.get_loops(block=b5)
l145 = sch.fuse(l141, l142, l143, l144, preserve_unit_iters=True)
v146 = sch.sample_categorical(
candidates=[32, 64, 128, 256, 512, 1024],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=2,
)
l147, l148 = sch.split(loop=l145, factors=[None, v146], preserve_unit_iters=True)
sch.bind(loop=l147, thread_axis="blockIdx.x")
sch.bind(loop=l148, thread_axis="threadIdx.x")
sch.enter_postproc()
sch.unannotate(block_or_loop=b118, ann_key="meta_schedule.cooperative_fetch")
l149, l150, l151, l152, l153 = sch.get_loops(block=b118)
l154, l155, l156 = sch.split(loop=l153, factors=[None, 48, 4], preserve_unit_iters=True)
sch.vectorize(loop=l156)
sch.bind(loop=l155, thread_axis="threadIdx.x")
sch.unannotate(block_or_loop=b129, ann_key="meta_schedule.cooperative_fetch")
l157, l158, l159, l160, l161 = sch.get_loops(block=b129)
l162, l163, l164 = sch.split(loop=l161, factors=[None, 48, 4], preserve_unit_iters=True)
sch.vectorize(loop=l164)
sch.bind(loop=l163, thread_axis="threadIdx.x")
b165 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b165, ann_key="meta_schedule.unroll_explicit")
b166, b167, b168, b169, b170, b171, b172, b173 = sch.get_child_blocks(b165)
l174, l175, l176, l177, l178, l179 = sch.get_loops(block=b166)
sch.annotate(block_or_loop=l174, ann_key="pragma_auto_unroll_max_s |
tep", ann_val=1024)
sch.annotate(block_or_loop=l174, ann_key="pragma_unroll_explicit", ann_val=1)
l180, l181, l182, l183, l184, l185 = sch.get_loops(block=b167)
sch.annotate(block_or_loop=l180, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l180, ann_key="pragma_unroll_explicit", ann_val=1)
l186, l187, l188, l189, l190, l191, l192 = sch.get_loops(block=b168)
sch.annotate(block_or_loop=l186, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l186, ann_key="pragma_unroll_explicit", ann_val=1)
l193, l194, l195, l196, l197, l198, l199 = sch.get_loops(block=b169)
sch.annotate(block_or_loop=l193, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l193, ann_key="pragma_unroll_explicit", ann_val=1)
(
l200,
l201,
l202,
l203,
l204,
l205,
l206,
l207,
l208,
l209,
l210,
l211,
l212,
l213,
) = sch.get_loops(block=b170)
sch.annotate(block_or_loop=l200, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l200, ann_key="pragma_unroll_explicit", ann_val=1)
l214, l215, l216, l217, l218, l219, l220 = sch.get_loops(block=b171)
sch.annotate(block_or_loop=l214, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l214, ann_key="pragma_unroll_explicit", ann_val=1)
l221, l222, l223, l224, l225, l226 = sch.get_loops(block=b172)
sch.annotate(block_or_loop=l221, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l221, ann_key="pragma_unroll_explicit", ann_val=1)
l227, l228 = sch.get_loops(block=b173)
sch.annotate(block_or_loop=l227, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l227, ann_key |
="pragma_unroll_explicit", ann_val=1)
b229 = sch.get_block(name="data_pack", func_name="main")
l230, l231, l232, l233, l234, l235 = sch.get_loops(block=b229)
b236 = sch.decompose_reduction(block=b229, loop=l234)
b237 = sch.get_block(name="bgemm", func_name="main")
(
l238,
l239,
l240,
l241,
l242,
l243,
l244,
l245,
l246,
l247,
l248,
l249,
l250,
l251,
) = sch.get_loops(block=b237)
b252 = sch.decompose_reduction(block=b237, loop=l241)
b253 = sch.get_block(name="inverse", func_name="main")
l254, l255, l256, l257, l258, l259 = sch.get_loops(block=b253)
b260 = sch.decompose_reduction(block=b253, loop=l258)
verify(
Conv2dWinogradAddRelu,
apply_trace,
Conv2dWinogradAddResidualRelu,
"cuda",
Conv2dWinogradAddResidualRelu_scheduled,
)
def test_inline_order():
def apply_trace(sch: Schedule) -> None:
b0 = sch.get_block(name="pad_temp", func_name="main")
b1 = sch.get_block(name="conv2d_nhwc", func_name="main")
b2 = sch.get_block(name="T_subtract", func_name="main")
b3 = sch.get_block(name="T_add", func_name="main")
b4 = sch.get_block(name="compute", func_name="main")
b5 = sch.get_block(name="T_add_1", func_name="main")
b6 = sch.get_block(name="compute_1", func_name="main")
b7 = sch.get_block(name="T_subtract_1", func_name="main")
b8 = sch.get_block(name="compute_2", func_name="main")
b9 = sch.get_block(name="root", func_name="main")
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.tiling_structure", ann_val="SSSRRSRS")
b10 = sch.reindex(block=b1, buffer=("write", 0))
b11 = sch.reindex(block=b1, buffer=("read", 0))
b12 = sch.reindex(block=b1, buffer=("read" |
, 1))
sch.transform_layout(
block=b1,
buffer=("read", 0),
index_map=lambda nn, yy, xx, rc: (
(((nn * 3136) + (yy * 56)) + xx),
rc,
),
pad_value=None,
)
sch.transform_layout(
block=b1,
buffer=("read", 1),
index_map=lambda ff, ry, rx, rc: (
ry,
rx,
ff,
rc,
),
pad_value=None,
)
sch.transform_layout(
block=b1,
buffer=("write", 0),
index_map=lambda nn, yy, xx, ff: (
(((nn * 3136) + (yy * 56)) + xx),
ff,
),
pad_value=None,
)
sch.transform_block_layout(
block=b10,
index_map=lambda nn, yy, xx, ff: (
(((nn * 3136) + (yy * 56)) + xx),
ff,
),
)
sch.transform_block_layout(
block=b11,
index_map=lambda nn, yy, xx, rc: (
(((nn * 3136) + (yy * 56)) + xx),
rc,
),
)
sch.transform_block_layout(
block=b12,
index_map=lambda ff, ry, rx, rc: (
ry,
rx,
ff,
rc,
),
)
sch.transform_block_layout(
block=b1,
index_map=lambda nn, yy, xx, ff, ry, rx, rc: (
ry,
rx,
(((nn * 3136) + (yy * 56)) + xx),
ff,
rc,
),
)
l13, l14, l15, l16, l17 = sch.get_loops(block=b1)
l18, l19 = sch.split(loop=l17, factors=[None, 16], preserve_unit_iters=True)
l20, l21 = sch.split(loop=l16, factors=[None, 16], preserve_unit_iters=True)
l22, l23 = sch.split(loop=l15, factors=[None, 16], preserve_unit_iters=True)
l24, l25, l26, l27, l28, l29, l30, l31 = sch.get_loops(block=b1 |
)
sch.reorder(l28, l30, l23, l21, l19)
b32 = sch.blockize(loop=l23)
sch.annotate(
block_or_loop=b32,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_sync_16x16x16_s8s8s32_trans",
)
sch.annotate(
block_or_loop=b32,
ann_key="meta_schedule.auto_tensorize_init",
ann_val="wmma_fill_16x16x16_s32",
)
sch.annotate(block_or_loop=b32, ann_key="warp_execution", ann_val=1)
l33, l34, l35, l36, l37 = sch.get_loops(block=b32)
v38, v39, v40 = sch.sample_perfect_tile(
loop=l33, n=3, max_innermost_factor=4, decision=[1, 1, 1]
)
l41, l42, l43 = sch.split(loop=l33, factors=[v38, v39, v40], preserve_unit_iters=True)
v44, v45, v46 = sch.sample_perfect_tile(
loop=l34, n=3, max_innermost_factor=4, decision=[1, 1, 1]
)
l47, l48, l49 = sch.split(loop=l34, factors=[v44, v45, v46], preserve_unit_iters=True)
v50, v51, v52, v53, v54 = sch.sample_perfect_tile(
loop=l35, n=5, max_innermost_factor=4, decision=[8, 196, 2, 1, 1]
)
l55, l56, l57, l58, l59 = sch.split(
loop=l35, factors=[v50, v51, v52, v53, v54], preserve_unit_iters=True
)
v60, v61, v62, v63, v64 = sch.sample_perfect_tile(
loop=l36, n=5, max_innermost_factor=4, decision=[4, 1, 2, 1, 2]
)
l65, l66, l67, l68, l69 = sch.split(
loop=l36, factors=[v60, v61, v62, v63, v64], preserve_unit_iters=True
)
v70, v71, v72 = sch.sample_perfect_tile(
loop=l37, n=3, max_innermost_factor=4, decision=[2, 2, 1]
)
l73, l74, l75 = sch.split(loop=l37, factors=[v70, v71, v72], preserve_unit_iters=True)
sch.reorder(
l55,
l65,
l56,
l66,
l57,
l67,
l41,
l47,
l73,
l42,
l48,
l74,
l58, |
l68,
l43,
l49,
l75,
l59,
l69,
)
l76 = sch.fuse(l55, l65, preserve_unit_iters=True)
sch.bind(loop=l76, thread_axis="blockIdx.y")
l77 = sch.fuse(l56, l66, preserve_unit_iters=True)
sch.bind(loop=l77, thread_axis="blockIdx.x")
l78 = sch.fuse(l57, l67, preserve_unit_iters=True)
sch.bind(loop=l78, thread_axis="threadIdx.y")
sch.annotate(
block_or_loop=b32, ann_key="meta_schedule.thread_extent_low_inclusive", ann_val=32
)
sch.annotate(
block_or_loop=b32, ann_key="meta_schedule.thread_extent_high_inclusive", ann_val=1024
)
b79 = sch.cache_write(block=b32, write_buffer_index=0, storage_scope="shared")
sch.reverse_compute_at(block=b79, loop=l77, preserve_unit_loops=True, index=-1)
b80 = sch.cache_write(block=b32, write_buffer_index=0, storage_scope="wmma.accumulator")
sch.reverse_compute_at(block=b80, loop=l78, preserve_unit_loops=True, index=-1)
v81 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=1,
)
sch.annotate(block_or_loop=b79, ann_key="meta_schedule.cooperative_fetch", ann_val=v81)
sch.reverse_compute_inline(block=b10)
l82, l83, l84, l85, l86 = sch.get_loops(block=b80)
l87, l88 = sch.split(loop=l86, factors=[None, 16], preserve_unit_iters=True)
l89, l90 = sch.split(loop=l85, factors=[None, 16], preserve_unit_iters=True)
l91, l92, l93, l94, l95, l96, l97 = sch.get_loops(block=b80)
sch.reorder(l96, l90, l88)
b98 = sch.blockize(loop=l90)
sch.annotate(
block_or_loop=b98,
ann_key="meta_schedule.auto_tensorize", |
ann_val="wmma_store_16x16x16_s32_shared",
)
b99 = sch.cache_read(
block=b32, read_buffer_index=0, storage_scope="shared", consumer_blocks=[b32]
)
sch.compute_at(block=b99, loop=l73, preserve_unit_loops=True, index=-1)
l100, l101, l102, l103, l104, l105, l106, l107 = sch.get_loops(block=b99)
l108 = sch.fuse(l106, l107, preserve_unit_iters=True)
v109 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=3,
)
sch.annotate(block_or_loop=b99, ann_key="meta_schedule.cooperative_fetch", ann_val=v109)
b110 = sch.cache_read(
block=b32, read_buffer_index=1, storage_scope="shared", consumer_blocks=[b32]
)
sch.compute_at(block=b110, loop=l73, preserve_unit_loops=True, index=-1)
l111, l112, l113, l114, l115, l116, l117, l118, l119, l120 = sch.get_loops(block=b110)
l121 = sch.fuse(l117, l118, l119, l120, preserve_unit_iters=True)
v122 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=2,
)
sch.annotate(block_or_loop=b110, ann_key="meta_schedule.cooperative_fetch", ann_val=v122)
b123 = sch.cache_read(block=b32, read_buffer_index=0, storage_scope="wmma.matrix_a")
sch.compute_at(block=b123, loop=l74, preserve_unit_loops=True, index=-1)
l124, l125, l126, l127, l128, l129, l130, l131, l132, l133, l134 = sch.get_loops(block=b123)
l135, l136 = sch.split(loo |
p=l134, factors=[None, 16], preserve_unit_iters=True)
l137, l138 = sch.split(loop=l133, factors=[None, 16], preserve_unit_iters=True)
(
l139,
l140,
l141,
l142,
l143,
l144,
l145,
l146,
l147,
l148,
l149,
l150,
l151,
) = sch.get_loops(block=b123)
sch.reorder(l150, l138, l136)
b152 = sch.blockize(loop=l138)
sch.annotate(
block_or_loop=b152,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_load_16x16x16_s8_a",
)
b153 = sch.cache_read(block=b32, read_buffer_index=1, storage_scope="wmma.matrix_b")
sch.compute_at(block=b153, loop=l74, preserve_unit_loops=True, index=-1)
(
l154,
l155,
l156,
l157,
l158,
l159,
l160,
l161,
l162,
l163,
l164,
l165,
l166,
) = sch.get_loops(block=b153)
l167, l168 = sch.split(loop=l166, factors=[None, 16], preserve_unit_iters=True)
l169, l170 = sch.split(loop=l165, factors=[None, 16], preserve_unit_iters=True)
(
l171,
l172,
l173,
l174,
l175,
l176,
l177,
l178,
l179,
l180,
l181,
l182,
l183,
l184,
l185,
) = sch.get_loops(block=b153)
sch.reorder(l184, l170, l168)
b186 = sch.blockize(loop=l170)
sch.annotate(
block_or_loop=b186,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_load_16x16x16_s8_b_trans",
)
sch.compute_inline(block=b11)
sch.compute_inline(block=b12)
sch.storage_align(block=b99, buffer_index=0, axis=-2, factor=32, offset=16)
sch.storage_align(bl |
ock=b110, buffer_index=0, axis=-2, factor=32, offset=16)
sch.reverse_compute_inline(block=b8)
sch.reverse_compute_inline(block=b7)
sch.reverse_compute_inline(block=b6)
sch.reverse_compute_inline(block=b5)
sch.reverse_compute_inline(block=b4)
sch.reverse_compute_inline(block=b3)
sch.reverse_compute_inline(block=b2)
sch.compute_inline(block=b0)
v187 = sch.sample_categorical(
candidates=[0, 16, 64, 512, 1024],
probs=[
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
],
decision=4,
)
sch.annotate(block_or_loop=b9, ann_key="meta_schedule.unroll_explicit", ann_val=v187)
sch.enter_postproc()
sch.unannotate(block_or_loop=b79, ann_key="meta_schedule.cooperative_fetch")
l188, l189, l190, l191 = sch.get_loops(block=b79)
l192, l193, l194, l195 = sch.split(
loop=l191, factors=[None, 4, 32, 2], preserve_unit_iters=True
)
verify(
Conv2dInt8_with_predicate,
apply_trace,
Conv2dInt8_with_predicate_target,
"cuda",
Conv2dInt8_with_predicate_scheduled,
)
if __name__ == "__main__":
tvm.testing.main() |
"""Test the tune context of meta schedule.""" |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.meta_schedule |
import TuneContext
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
def test_tune_context_create():
mod = Matmul
context = TuneContext(mod=mod, target=Target("llvm"), task_name="Test Task")
assert context.num_threads > 0
assert context.rand_state != -1
assert context.task_name == "Test Task"
assert context.mod == mod or tvm.ir.structural_equal(context.mod, mod)
if __name__ == "__main__":
tvm.testing.main() |
import logging |
import tempfile |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing.custom_builder_runner |
import run_module_via_rpc
from tvm.meta_schedule.testing.local_rpc |
import LocalRPC
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.tir.schedule |
import BlockRV, Schedule
logging.basicConfig()
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def two_step(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.alloc_buffer((1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j in T.grid(1024, 1024):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(1024, 1024):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 3.0
@tvm.testing.requires_llvm
def test_tune_matmul_cpu():
with tempfile.TemporaryDirectory() as work_dir:
target = Target("llvm --num-cores=16")
database = ms.tir_integration.tune_tir(
mod=matmul,
target=target,
work_dir=work_dir,
max_trials_global=32,
num_trials_per_iter=16,
)
sch = ms.tir_integration.compile_tir(database, matmul, target)
if sch is None:
print("No valid schedule found!")
else:
sch.mod.show()
sch.trace.show()
@tvm.testing.requires_cuda
def test_tune_matmul_cuda():
with tempfile.TemporaryDirectory() as work_dir:
target = Target("nvidia/geforce-rtx-3070")
database = ms.tir_integration.tune_tir(
mod=matmul,
target=target,
work_dir=work_dir,
max_trials_global=32,
num_trials_per_iter=16,
)
sch = ms.tir_integration.compil |
e_tir(database, matmul, target)
if sch is None:
print("No valid schedule found!")
else:
sch.mod.show()
sch.trace.show()
def test_tune_run_module_via_rpc():
target = tvm.target.Target("llvm")
rt_mod = tvm.build(matmul, target)
input_data = {}
input_shape = (128, 128)
input_dtype = "float32"
a_np = np.random.uniform(size=input_shape).astype(input_dtype)
b_np = np.random.uniform(size=input_shape).astype(input_dtype)
c_np = np.zeros(input_shape).astype(input_dtype)
for i in range(128):
for j in range(128):
for k in range(128):
c_np[i, j] = c_np[i, j] + a_np[i, k] * b_np[j, k]
input_data["a"] = a_np
input_data["b"] = b_np
input_data["c"] = np.zeros(input_shape).astype(input_dtype)
with LocalRPC() as rpc:
rpc_config = ms.runner.RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
def f_timer(rt_mod, dev, input_data):
rt_mod(input_data["a"], input_data["b"], input_data["c"])
return input_data["c"]
result = run_module_via_rpc(
rpc_config=rpc_config,
lib=rt_mod,
dev_type=target.kind.name,
args=input_data,
continuation=f_timer,
)
tvm.testing.assert_allclose(result.numpy(), c_np, rtol=1e-3)
def test_tune_block_cpu():
@ms.derived_object |
class RemoveBlock(ms.schedule_rule.PyScheduleRule):
def _initialize_with_tune_context(self, context: ms.TuneContext) -> None:
pass
def apply(self, sch: Schedule, block: BlockRV):
if sch.get(block).name_hint == "root":
return [sch]
sch = sch.copy()
sch.compute_inline(block)
return [sch]
def clone(self) -> "RemoveBlock":
return RemoveBlock()
with tempfile.TemporaryDirectory() as work_dir:
target = Target("llvm --num-cores=16")
database = ms.tir_integration.tune_tir(
mod=two_step,
target=target,
work_dir=work_dir,
max_trials_global=32,
num_trials_per_iter=16,
space=ms.space_generator.PostOrderApply(
f_block_filter=lambda block: block.name_hint == "A",
sch_rules=[RemoveBlock()],
postprocs=[],
mutator_probs={},
),
)
sch = ms.tir_integration.compile_tir(database, two_step, target)
assert sch is not None
sch.mod.show()
sch.trace.show()
if __name__ == """__main__""":
test_tune_matmul_cpu()
test_tune_matmul_cuda()
test_tune_run_module_via_rpc()
test_tune_block_cpu() |
import logging |
import tempfile
from typing |
import Optional |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import meta_schedule as ms
from tvm |
import relay
from tvm._ffi |
import register_func
from tvm.tir.schedule |
import BlockRV, Schedule
from tvm.tir.schedule.analysis |
import has_block
from tvm.tir.tensor_intrin.x86 |
import VNNI_DOT_16x4_INTRIN as VNNI_INTRIN
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
def _schedule_dense(m: Optional[int], do_tune: bool):
"""Manually schedule a dense block, created from TE compute op via CreatePrimFunc,
using VNNI instruction.
"""
def schedule_fn(sch, dense_block: Optional[BlockRV] = None) -> bool:
if sch.mod.attrs is not None and "dense" not in sch.mod.attrs["task_name"]:
return False
if dense_block is None:
assert has_block(sch, "compute")
dense_block = sch.get_block("compute")
assert "dense_vnni" in sch.get(dense_block).annotations["schedule_rule"]
post_blocks = sch.get_consumers(dense_block)
if len(post_blocks) > 0:
while True:
next_post_blocks = []
for post_block in post_blocks:
next_consumers = sch.get_consumers(post_block)
if len(next_consumers) > 0:
sch.compute_inline(post_block)
next_post_blocks += next_consumers
if len(next_post_blocks) == 0:
assert len(post_blocks) == 1
outer_block = post_blocks[0]
a_y, a_x = sch.get_loops(outer_block)[-2:]
break
post_blocks = next_post_blocks
else:
a_y, a_x, _ = sch.get_loops(dense_block)[-3:]
outer_block = dense_block
if do_tune:
y_factors = sch.sample_perfect_tile(a_y, n=2, max_innermost_factor=128)
a_yo, a_yi = sch.split(a_y, factors=y_factors)
else:
a_yo, a_yi = sch.split(a_y, factors=[None, min(m, 64)])
a_xo, a_xi = sch.split(a_x, factors=[None, 16])
sch.reorder(a_yo, a_xo, a_yi, a_xi)
fused = sch.fuse(a_yo, a_xo)
if outer_block != |
dense_block:
sch.vectorize(a_xi)
sch.compute_at(dense_block, a_yi)
a_xi, a_k = sch.get_loops(dense_block)[-2:]
a_ko, a_ki = sch.split(a_k, factors=[None, 4])
sch.reorder(a_ko, a_xi, a_ki)
sch.parallel(fused)
dec = sch.decompose_reduction(dense_block, a_ko)
init_loop = sch.get_loops(dec)[-1]
sch.vectorize(init_loop)
sch.tensorize(a_xi, VNNI_INTRIN)
return True
return schedule_fn
def _relay_dense(m, n, k):
data = relay.var("data", shape=(m, k), dtype="uint8")
weight = relay.var("weight", shape=(n, k), dtype="int8")
bias = relay.var("bias", shape=(n,), dtype="int32")
dense = relay.nn.dense(data, weight, out_dtype="int32")
bias_add = relay.nn.bias_add(dense, bias) + relay.const(1, dtype="int32")
out = relay.nn.batch_matmul(
relay.cast(relay.expand_dims(bias_add, 0), "uint8"),
relay.cast(relay.expand_dims(bias_add, 0), "int8"),
out_dtype="int32",
)
relay_mod = tvm.IRModule.from_expr(out)
data = np.random.uniform(1, 10, size=(m, k)).astype("uint8")
params = {
"weight": np.random.uniform(1, 10, size=(n, k)).astype("int8"),
"bias": np.random.uniform(1, 10, size=(n,)).astype("int32"),
}
def f_check(lib, dev):
ref = (
relay.create_executor(
"vm",
mod=relay_mod,
device=dev,
target="llvm",
)
.evaluate()(data, params["weight"], params["bias"])
.numpy()
)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data)
runtime.run()
out = runtime.get_output(0).numpy()
np.testing.assert_equal(out, ref)
return relay_mod, params, f_check
@tvm.testing.requires_cascadelake
def test_vnni_schedule_fn_database():
m, n, k = 1024, 1024, 1024
target = tvm.target.Target("llvm -mcpu=cascadelake -num- |
cores 4")
dev = tvm.cpu(0)
relay_mod, params, f_check = _relay_dense(m, n, k)
with ms.database.ScheduleFnDatabase(
_schedule_dense(
m=m,
do_tune=False,
)
), tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_meta_schedule": True},
):
"""The log should say
Warning: Cannot find workload: tvmgen_default_fused_expand_dims
Warning: Cannot find workload: tvmgen_default_fused_cast
Warning: Cannot find workload: tvmgen_default_fused_cast_1
Warning: Cannot find workload: tvmgen_default_fused_nn_batch_matmul
This means batch matmul and others are scheduled by TE, and dense (the one not warned)
is found in the meta schedule tuning database during compilation
"""
lib = relay.build(relay_mod, target=target, params=params)
f_check(lib, dev)
@tvm.testing.requires_cascadelake
def test_vnni_schedule_fn_tune():
"""
We can inject and apply a custom TIR scheduling to a TE compute of interest, using
the "schedule_rule" annotation. For example, in topi/x86/dense.py we have the following
declaration for int8 dense targeting the VNNI instruction.
C = te.compute(
...
attrs={"schedule_rule": "meta_schedule.x86.dense_vnni"},
)
When the MetaSchedule encounters a TensorIR block with the "schedule_rule" annotation,
it looks up the packed func registry for a function that is associated with the given schedule
rule key ("meta_schedule.x86.dense_vnni" in this example). The signature of such custom
schedule functions must be
(tir.schedule.Schedule, tir.schedule.BlockRV) -> [tir.schedule.Schedule].
The BlockRV argument corresponds to the TE compute annotated with "schedule_rule".
The relevant code is in `src/meta_schedule/space_generator/apply_custom_rule.cc`.
"""
def schedule_rule_dense_vnni(sch: Schedule, dense_block: BlockRV):
_schedule_dense(m=None, do_tune=Tru |
e)(sch, dense_block)
return [sch]
register_func("meta_schedule.x86.dense_vnni", schedule_rule_dense_vnni)
m, n, k = 1024, 1024, 1024
target = tvm.target.Target("llvm -keys=x86,cpu -mcpu=cascadelake -num-cores=4")
dev = tvm.cpu(0)
relay_mod, params, f_check = _relay_dense(m, n, k)
extracted_tasks = ms.relay_integration.extract_tasks(relay_mod, target, params)
with tempfile.TemporaryDirectory() as work_dir:
tasks, weights = ms.relay_integration.extracted_tasks_to_tune_contexts(
list(
filter(
lambda task: "dense" in task.task_name,
extracted_tasks,
)
),
work_dir=work_dir,
space=ms.space_generator.PostOrderApply(
f_block_filter=None,
sch_rules="from-target",
postprocs=[],
mutator_probs="from-target",
),
)
database = ms.relay_integration.tune_tasks(
tasks=tasks,
task_weights=weights,
work_dir=work_dir,
max_trials_per_task=32,
max_trials_global=20000,
)
with database, tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_meta_schedule": True},
):
"""The log should say
Warning: Cannot find workload: tvmgen_default_fused_expand_dims
Warning: Cannot find workload: tvmgen_default_fused_cast
Warning: Cannot find workload: tvmgen_default_fused_cast_1
Warning: Cannot find workload: tvmgen_default_fused_nn_batch_matmul
This means batch matmul and others are scheduled by TE, and dense (the one not warned)
is found in the meta schedule tuning database during compilation
"""
lib = relay.build(relay_mod, target=target, params=params)
f_check(lib, dev)
if __name__ == """__main__""":
test_vnni_schedule_fn_database()
test_vnni_schedule_fn_tune() |
import pathlib |
import sys |
import datetime |
import json |
import os |
import tarfile |
import numpy as np |
import pytest |
import platform
pytest.importorskip("tvm.micro") |
import tvm |
import tvm.relay
from tvm.relay.backend |
import Executor, Runtime
from tvm.relay.testing |
import byoc |
import tvm.runtime.module |
import tvm.testing
from tvm.contrib |
import utils |
import tvm.micro as micro
from tvm.micro.testing.utils |
import get_conv2d_relay_module |
import tvm.micro.model_library_format as model_library_format
from tvm.micro.model_library_format |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.