text
stringlengths 1
2.05k
|
---|
lock("compute"):
nn = T.axis.spatial(1, 0)
ff = T.axis.spatial(512, i0_0_i1_0_i2_0_i3_0_fused
yy = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused
xx = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 2 * 28 + i3_4)
rc = T.axis.reduce(512, i4_1 * 16 + i4_2)
ry, rx = T.axis.remap("RR", [i5_0, i6_2])
with T.init():
compute_local[nn, ff, yy, xx] = T.float32(0)
compute_local[nn, ff, yy, xx] = compute_local[nn, ff, yy, xx] + pad_temp_shared[nn, rc, yy + ry, xx + rx] * W_shared[ff, rc, ry, rx]
for ax0, ax1, ax2, ax3 in T.grid(1, 8, 2, 28):
with T.block("compute_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(512, i0_0_i1_0_i2_0_i3_0_fused
v2 = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 14
v3 = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 2 * 28 + ax3)
compute_1[v0, v1, v2, v3] = compute_local[v0, v1, v2, v3]
for i0, i1, i2, i3 in T.grid(1, 512, 56, 56):
with T.block("compute_1"):
i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3])
compute[i0_2, i1_2, i2_2, i3_2] = T.max((compute_1[i0_2, i1_2, i2_2, i3_2] + B[i1_2, 0, 0]) * bn_scale[i1_2, 0, 0] + bn_offset[i1_2, 0, 0], T.float32(0))
@tvm.script.ir_module
class MultiLevelTiledConv2DAfterInline:
@T.prim_func
def main(X: T.Buffer[(1, 512, 56, 56), "float32"], W: T.Buffer[(512, 512, 3, 3), "float32"], B: T.Buffer[(512, 1, 1), "float32"], bn_scale: T.Buffer[(512, 1, 1), "float32"], bn_offset: T.Buffer[(512, 1, 1), "float32"], compute: T.Buffer[(1, 512, 56, 56), "float32"]) -> None:
compute_local = T.alloc_buffer([ |
1, 512, 56, 56], dtype="float32", scope="local")
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(224, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(2, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(8, thread="threadIdx.x"):
for i4_0, i5_0, i6_0, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3, i4_2, i5_2, i6_2, i0_4, i1_4, i2_4, i3_4 in T.grid(1, 3, 1, 32, 1, 1, 1, 1, 1, 1, 16, 1, 3, 1, 8, 2, 28):
with T.block("compute"):
nn = T.axis.spatial(1, 0)
ff = T.axis.spatial(512, i0_0_i1_0_i2_0_i3_0_fused
yy = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused
xx = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 2 * 28 + i3_4)
rc = T.axis.reduce(512, i4_1 * 16 + i4_2)
ry, rx = T.axis.remap("RR", [i5_0, i6_2])
with T.init():
compute_local[nn, ff, yy, xx] = T.float32(0)
compute_local[nn, ff, yy, xx] = compute_local[nn, ff, yy, xx] + T.if_then_else(yy + ry >= 1 and yy + ry < 57 and xx + rx >= 1 and xx + rx < 57, X[nn, rc, yy + ry - 1, xx + rx - 1], T.float32(0), dtype="float32") * W[ff, rc, ry, rx]
for ax0, ax1, ax2, ax3 in T.grid(1, 8, 2, 28):
with T.block("compute_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(512, i0_0_i1_0_i2_0_i3_0_fused
v2 = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 14
v3 = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 2 * 28 + ax3)
compute[v0, v1, v2, v3] = T.max((compute_local[v0, v1, v2, v3] + B[v1, 0, 0]) * bn_scale[v1, 0, 0] + bn_offset[v1, 0, 0], T.float32(0))
@tvm.script.ir_module
class SoftmaxBeforeInline:
@T.prim_func |
def main(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
with T.init():
T_softmax_maxelem[i0_1] = T.min_value("float32")
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_exp"):
i0_2, i1_1 = T.axis.remap("SS", [i0, i1])
T_softmax_exp[i0_2, i1_1] = T.exp(A[i0_2, i1_1] - T_softmax_maxelem[i0_2], dtype="float32")
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_4, k = T.axis.remap("SR", [i0_3, i1])
with T.init():
T_softmax_expsum[i0_4] = T.float32(0)
T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T_softmax_exp[i0_4, k]
for i0_5, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_6, i1_2 = T.axis.remap("SS", [i0_5, i1])
T_softmax_norm[i0_6, i1_2] = T_softmax_exp[i0_6, i1_2] / T_softmax_expsum[i0_6]
@tvm.script.ir_module
class SoftmaxAfterInline:
@T.prim_func
def main(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
with T.init():
T_softmax_maxelem[i0_1] = T.min_value("float32")
T_softmax_maxelem[i0_1] = T.max(T_s |
oftmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_2, k = T.axis.remap("SR", [i0, i1])
with T.init():
T_softmax_expsum[i0_2] = T.float32(0)
T_softmax_expsum[i0_2] = T_softmax_expsum[i0_2] + T.exp(A[i0_2, k] - T_softmax_maxelem[i0_2], dtype="float32")
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_4, i1_1 = T.axis.remap("SS", [i0_3, i1])
T_softmax_norm[i0_4, i1_1] = T.exp(A[i0_4, i1_1] - T_softmax_maxelem[i0_4], dtype="float32") / T_softmax_expsum[i0_4]
@tvm.script.ir_module
class BeforePureSpatial:
@T.prim_func
def main(
placeholder: T.Buffer[(1, 384), "int64"],
placeholder_1: T.Buffer[(30522, 768), "float32"],
placeholder_2: T.Buffer[(1, 384, 768), "float32"],
T_add: T.Buffer[(1, 384, 768), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
compile_engine_const = T.alloc_buffer([], dtype="int64")
T_less = T.alloc_buffer([1, 384], dtype="bool")
compile_engine_const_1 = T.alloc_buffer([], dtype="int64")
T_add_1 = T.alloc_buffer([1, 384], dtype="int64")
T_where = T.alloc_buffer([1, 384], dtype="int64")
T_take = T.alloc_buffer([1, 384, 768], dtype="float32")
with T.block("compile_engine_const"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const[()])
compile_engine_const[()] = T.int64(0)
for i0, i1 in T.grid(1, 384):
with T.block("T_less"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(placeholder[ax0, ax1], compile_engine_const[()])
T.writes(T_less[ax0, ax1])
T_less[ax0, ax1] = placeholder[ax0, ax1] < compile_engine_const[()]
with T.block("compile_engine_const_1"):
vi = T.axis.spatial(1, 0) |
T.reads()
T.writes(compile_engine_const_1[()])
compile_engine_const_1[()] = T.int64(30522)
for i0, i1 in T.grid(1, 384):
with T.block("T_add"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(placeholder[ax0, ax1], compile_engine_const_1[()])
T.writes(T_add_1[ax0, ax1])
T_add_1[ax0, ax1] = placeholder[ax0, ax1] + compile_engine_const_1[()]
for i0, i1 in T.grid(1, 384):
with T.block("T_where"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_less[ax0, ax1], T_add_1[ax0, ax1], placeholder[ax0, ax1])
T.writes(T_where[ax0, ax1])
T_where[ax0, ax1] = T.Select(
T.cast(T_less[ax0, ax1], "int32") != 0, T_add_1[ax0, ax1], placeholder[ax0, ax1]
)
for i0, i1, i2 in T.grid(1, 384, 768):
with T.block("T_take"):
ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2])
T.reads(
placeholder_1[T.min(T.max(T.int64(0), T_where[ax0, ax1]), T.int64(30521)), ax2],
T_where[ax0, ax1],
)
T.writes(T_take[ax0, ax1, ax2])
T_take[ax0, ax1, ax2] = placeholder_1[
T.min(T.max(T.int64(0), T_where[ax0, ax1]), T.int64(30521)), ax2
]
for i0, i1, i2 in T.grid(1, 384, 768):
with T.block("T_add_1"):
ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2])
T.reads(T_take[ax0, ax1, ax2], placeholder_2[ax0, ax1, ax2])
T.writes(T_add[ax0, ax1, ax2])
T_add[ax0, ax1, ax2] = T_take[ax0, ax1, ax2] + placeholder_2[ax0, ax1, ax2]
@tvm.script.ir_module
class AfterPureSpatial:
@T.prim_func
def main(placeholder: T.Buffer[(1, 384), "int64"], placeholder_1: T.Buffer[(30522, 768), "float32"], placeholder_2: T.Buffer[(1, 384, 768), "float32"], T_add: T.Buffer[(1, 384, 768), "float32"]) -> |
None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2 in T.grid(1, 384, 768):
with T.block("T_add_1"):
ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2])
T.reads(placeholder[ax0, ax1], placeholder_1[T.min(T.max(T.int64(0), placeholder[ax0, ax1]), T.int64(30521)) : T.min(T.max(T.int64(0), placeholder[ax0, ax1] + T.int64(30522)), T.int64(30521)) + T.int64(1), ax2], placeholder_2[ax0, ax1, ax2])
T.writes(T_add[ax0, ax1, ax2])
T_add[ax0, ax1, ax2] = placeholder_1[T.min(T.max(T.int64(0), T.Select(T.cast(placeholder[ax0, ax1] < T.int64(0), "int32") != 0, placeholder[ax0, ax1] + T.int64(30522), placeholder[ax0, ax1])), T.int64(30521)), ax2] + placeholder_2[ax0, ax1, ax2]
@tvm.script.ir_module
class ConstConsumer:
@T.prim_func
def main(T_full: T.Buffer[(1, 12, 4096), "int64"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2 in T.grid(1, 12, 4096):
with T.block("T_full"):
ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2])
T.reads()
T.writes(T_full[ax0, ax1, ax2])
T_full[ax0, ax1, ax2] = T.int64(0)
@tvm.script.ir_module
class Conv2dInt8:
@T.prim_func
def main(p0: T.Buffer[(16, 14, 14, 256), "int8"], p1: T.Buffer[(1024, 1, 1, 256), "int8"], p2: T.Buffer[(1, 1, 1, 1024), "int32"], p3: T.Buffer[(1, 1, 1, 1024), "int32"], p4: T.Buffer[1024, "int32"], p5: T.Buffer[1024, "int32"], p6: T.Buffer[1024, "int32"], p7: T.Buffer[1, "int32"], p8: T.Buffer[(16, 14, 14, 1024), "int32"], compute: T.Buffer[(16, 14, 14, 1024), "int32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
compile_engine_const = T.alloc_buffer([], dtype="int32")
pad_temp = T.alloc_buffer([16, 14, 14, 256], dtype="int8")
conv2d_nhwc = T.alloc_buffer([16, 14, 14, 1024], dtype |
="int32")
T_subtract = T.alloc_buffer([16, 14, 14, 1024], dtype="int32")
T_add = T.alloc_buffer([16, 14, 14, 1024], dtype="int32")
compute_1 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32")
T_add_1 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32")
compute_2 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32")
T_subtract_1 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32")
compute_3 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32")
T_add_2 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32")
with T.block("compile_engine_const"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const[()])
compile_engine_const[()] = 59
for i0, i1, i2, i3 in T.grid(16, 14, 14, 256):
with T.block("pad_temp"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p0[i0_1, i1_1, i2_1, i3_1])
T.writes(pad_temp[i0_1, i1_1, i2_1, i3_1])
pad_temp[i0_1, i1_1, i2_1, i3_1] = p0[i0_1, i1_1, i2_1, i3_1]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(16, 14, 14, 1024, 1, 1, 256):
with T.block("conv2d_nhwc"):
nn, yy, xx, ff, ry, rx, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(pad_temp[nn, yy + ry, xx + rx, rc], p1[ff, ry, rx, rc])
T.writes(conv2d_nhwc[nn, yy, xx, ff])
with T.init():
conv2d_nhwc[nn, yy, xx, ff] = 0
conv2d_nhwc[nn, yy, xx, ff] = conv2d_nhwc[nn, yy, xx, ff] + T.cast(pad_temp[nn, yy + ry, xx + rx, rc], "int32") * T.cast(p1[ff, ry, rx, rc], "int32")
for i0, i1, i2, i3 in T.grid(16, 14, 14, 1024):
with T.block("T_subtract"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(conv2d_nhwc[ax0, ax1, ax2, ax3], p2[0, 0, 0, ax3])
T.writes(T_subtract[ax0, ax1, ax2, ax3])
T_s |
ubtract[ax0, ax1, ax2, ax3] = conv2d_nhwc[ax0, ax1, ax2, ax3] - p2[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 14, 14, 1024):
with T.block("T_add"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_subtract[ax0, ax1, ax2, ax3], p3[0, 0, 0, ax3])
T.writes(T_add[ax0, ax1, ax2, ax3])
T_add[ax0, ax1, ax2, ax3] = T_subtract[ax0, ax1, ax2, ax3] + p3[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 14, 14, 1024):
with T.block("compute"):
i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2])
T.writes(compute_1[i0_2, i1_2, i2_2, i3_2])
compute_1[i0_2, i1_2, i2_2, i3_2] = T.q_multiply_shift_per_axis(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2], 31, False, True, dtype="int32")
for i0_3, i1_3, i2_3, i3_3 in T.grid(16, 14, 14, 1024):
with T.block("T_add_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_3, i1_3, i2_3, i3_3])
T.reads(compile_engine_const[()], compute_1[ax0, ax1, ax2, ax3])
T.writes(T_add_1[ax0, ax1, ax2, ax3])
T_add_1[ax0, ax1, ax2, ax3] = compile_engine_const[()] + compute_1[ax0, ax1, ax2, ax3]
for i0_4, i1_4, i2_4, i3_4 in T.grid(16, 14, 14, 1024):
with T.block("compute_1"):
i0_5, i1_5, i2_5, i3_5 = T.axis.remap("SSSS", [i0_4, i1_4, i2_4, i3_4])
T.reads(T_add_1[i0_5, i1_5, i2_5, i3_5])
T.writes(compute_2[i0_5, i1_5, i2_5, i3_5])
compute_2[i0_5, i1_5, i2_5, i3_5] = T.max(T.min(T_add_1[i0_5, i1_5, i2_5, i3_5], 255), 0)
for i0_6, i1_6, i2_6, i3_6 in T.grid(16, 14, 14, 1024):
with T.block("T_subtract_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_6, i1_6, i2_6, i3_6])
T.reads(compute_2[ax0, ax1, ax2, ax3], p7[0]) |
T.writes(T_subtract_1[ax0, ax1, ax2, ax3])
T_subtract_1[ax0, ax1, ax2, ax3] = compute_2[ax0, ax1, ax2, ax3] - p7[0]
for i0_7, i1_7, i2_7, i3_7 in T.grid(16, 14, 14, 1024):
with T.block("compute_2"):
i0_8, i1_8, i2_8, i3_8 = T.axis.remap("SSSS", [i0_7, i1_7, i2_7, i3_7])
T.reads(T_subtract_1[i0_8, i1_8, i2_8, i3_8])
T.writes(compute_3[i0_8, i1_8, i2_8, i3_8])
compute_3[i0_8, i1_8, i2_8, i3_8] = T.q_multiply_shift(T_subtract_1[i0_8, i1_8, i2_8, i3_8], 1408572815, 31, 1, dtype="int32")
for i0_9, i1_9, i2_9, i3_9 in T.grid(16, 14, 14, 1024):
with T.block("T_add_2"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_9, i1_9, i2_9, i3_9])
T.reads(compute_3[ax0, ax1, ax2, ax3], p8[ax0, ax1, ax2, ax3])
T.writes(T_add_2[ax0, ax1, ax2, ax3])
T_add_2[ax0, ax1, ax2, ax3] = compute_3[ax0, ax1, ax2, ax3] + p8[ax0, ax1, ax2, ax3]
for i0_10, i1_10, i2_10, i3_10 in T.grid(16, 14, 14, 1024):
with T.block("compute_3"):
i0_11, i1_11, i2_11, i3_11 = T.axis.remap("SSSS", [i0_10, i1_10, i2_10, i3_10])
T.reads(T_add_2[i0_11, i1_11, i2_11, i3_11])
T.writes(compute[i0_11, i1_11, i2_11, i3_11])
compute[i0_11, i1_11, i2_11, i3_11] = T.max(T.min(T_add_2[i0_11, i1_11, i2_11, i3_11], 255), 0)
def test_inline_consumer_chain():
mod = Conv2DBiasBnReLU
target = Target("llvm")
(space,) = generate_design_space(
kind="llvm",
mod=mod,
target=target,
types=ms.schedule_rule.AutoInline,
)
tvm.ir.assert_structural_equal(lhs=space.mod, rhs=Conv2DBiasBnReLUInlined)
def test_inline_into_cache():
mod = MultiLevelTiledConv2D
target = Target("cuda", host="llvm")
(space,) = generate_design_space(
kind="cuda",
mod=mod,
target=target,
types=ms.schedule_rule.AutoInline,
)
tvm.ir.a |
ssert_structural_equal(lhs=space.mod, rhs=MultiLevelTiledConv2DAfterInline)
def test_inline_into_multiple_consumers():
mod = SoftmaxBeforeInline
target = Target("cuda", host="llvm")
(space,) = generate_design_space(
kind="cuda",
mod=mod,
target=target,
types=ms.schedule_rule.AutoInline,
)
tvm.ir.assert_structural_equal(lhs=space.mod, rhs=SoftmaxAfterInline)
def test_inline_pure_spatial():
mod = BeforePureSpatial
target = Target("llvm")
(space,) = generate_design_space(
kind="llvm",
mod=mod,
target=target,
types=ms.schedule_rule.AutoInline,
)
tvm.ir.assert_structural_equal(lhs=space.mod, rhs=AfterPureSpatial)
def test_inline_constant_tensor():
mod = ConstConsumer
target = Target("cuda", host="llvm")
(space,) = generate_design_space(
kind="cuda",
mod=mod,
target=target,
types=ms.schedule_rule.AutoInline,
)
tvm.ir.assert_structural_equal(lhs=space.mod, rhs=ConstConsumer)
def test_conv2d_int8_inline_constant_scalars():
sch = Schedule(Conv2dInt8)
conv2d = sch.get_block("conv2d_nhwc")
sch.cache_write(conv2d, 0, "shared")
with pytest.raises(tvm.tir.ScheduleError) as e:
sch.reverse_compute_inline(sch.get_block("T_add_1"))
err_msg = "The block is only allowed to read a single buffer region, but it reads 2 region(s)"
assert err_msg in str(e)
ms.schedule_rule.InlineConstantScalars().apply(sch, sch.get_block("compile_engine_const"))
sch.reverse_compute_inline(sch.get_block("T_add_1"))
if __name__ == "__main__":
test_inline_consumer_chain()
test_inline_into_cache()
test_inline_into_multiple_consumers()
test_inline_pure_spatial()
test_inline_constant_tensor()
test_conv2d_int8_inline_constant_scalars() |
import tvm
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing |
import te_workload
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
)
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.te |
import create_prim_func
@tvm.script.ir_module
class Softmax_mn_after_inline:
@T.prim_func
def main(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
with T.init():
T_softmax_maxelem[i0_1] = T.min_value("float32")
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_2, k = T.axis.remap("SR", [i0, i1])
with T.init():
T_softmax_expsum[i0_2] = T.float32(0)
T_softmax_expsum[i0_2] = T_softmax_expsum[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_4, i1_1 = T.axis.remap("SS", [i0_3, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_4, i1_1] = (
T.exp(A[i0_4, i1_1] - T_softmax_maxelem[i0_4], dtype="float32")
/ T_softmax_expsum[i0_4]
)
def test_gpu_softmax_mn():
@T.prim_func
def softmax_mn_0(
A: T.Buffer[(256, 256), "float32"],
T_softmax_norm: T.Buffer[(256, 256), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T |
.axis.remap("SR", [i0, i1])
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_exp"):
i0_2, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(A[i0_2, i1_1], T_softmax_maxelem[i0_2])
T.writes(T_softmax_exp[i0_2, i1_1])
T_softmax_exp[i0_2, i1_1] = T.exp(
A[i0_2, i1_1] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_4, k = T.axis.remap("SR", [i0_3, i1])
T.reads(T_softmax_exp[i0_4, k])
T.writes(T_softmax_expsum[i0_4])
with T.init():
T_softmax_expsum[i0_4] = T.float32(0)
T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T_softmax_exp[i0_4, k]
for i0_5, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_6, i1_2 = T.axis.remap("SS", [i0_5, i1])
T.reads(T_softmax_exp[i0_6, i1_2], T_softmax_expsum[i0_6])
T.writes(T_softmax_norm[i0_6, i1_2])
T.block_attr({"axis": 1})
T_softmax_norm[i0_6, i1_2] = T_softmax_exp[i0_6, i1_2] / T_softmax_expsum[i0_6]
@T.prim_func
def softmax_mn_1(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0 in T.serial(256): |
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_1 = T.axis.spatial(256, ax0 + i0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_exp"):
T.where(i1_0 * 512 + i1_1 < 256)
i0_2 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.reads(A[i0_2, i1], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_exp[i0_2, i1])
T_softmax_exp[i0_2, i1] = T.exp(
A[i0_2, i1] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_4, k = T.axis.remap("SR", [i0_3, i1])
T.reads(T_softmax_exp[i0_4, k])
T.writes(T_softmax_expsum[i0_4])
with T.init():
T_softmax_expsum[i0_4] = T.float32(0)
T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T_softmax_exp[i0_4, k]
for i0_5, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_6, i1_2 = T.axis.remap("SS", [i0_5, i1])
T.reads(T_softmax_exp[i0_6, i1_2], T_softmax_expsum[i0_6 |
])
T.writes(T_softmax_norm[i0_6, i1_2])
T.block_attr({"axis": 1})
T_softmax_norm[i0_6, i1_2] = T_softmax_exp[i0_6, i1_2] / T_softmax_expsum[i0_6]
@T.prim_func
def softmax_mn_2(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_exp"):
i0_2, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(A[i0_2, i1_1], T_softmax_maxelem[i0_2])
T.writes(T_softmax_exp[i0_2, i1_1])
T_softmax_exp[i0_2, i1_1] = T.exp(
A[i0_2, i1_1] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i0_3 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 32):
for ax1_1 in T.thread_binding(8, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_4 = T.axis.spatial(256, ax0 + i0_3)
k = T.axis.reduce(256, ax1_0 * 8 + ax1_1)
T.reads(T_softmax_exp[i0_4, k])
T.writes(T_softmax_expsum_shared[i0_4])
with T.init():
T_softmax_expsum_shared[i0_4] = T.float |
32(0)
T_softmax_expsum_shared[i0_4] = (
T_softmax_expsum_shared[i0_4] + T_softmax_exp[i0_4, k]
)
for i1_0 in T.serial(32):
for i1_1_1 in T.thread_binding(8, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_5 = T.axis.spatial(256, i0_3)
i1 = T.axis.spatial(256, i1_0 * 8 + i1_1_1)
T.reads(T_softmax_exp[i0_5, i1], T_softmax_expsum_shared[i0_5])
T.writes(T_softmax_norm[i0_5, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_5, i1] = (
T_softmax_exp[i0_5, i1] / T_softmax_expsum_shared[i0_5]
)
@T.prim_func
def softmax_mn_3(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_1 = T.axis.spatial(256, ax0 + i0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax |
_maxelem_shared[i0_1], A[i0_1, k]
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_exp"):
T.where(i1_0 * 512 + i1_1 < 256)
i0_2 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.reads(A[i0_2, i1], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_exp[i0_2, i1])
T_softmax_exp[i0_2, i1] = T.exp(
A[i0_2, i1] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i0_3 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 32):
for ax1_1 in T.thread_binding(8, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_4 = T.axis.spatial(256, ax0 + i0_3)
k = T.axis.reduce(256, ax1_0 * 8 + ax1_1)
T.reads(T_softmax_exp[i0_4, k])
T.writes(T_softmax_expsum_shared[i0_4])
with T.init():
T_softmax_expsum_shared[i0_4] = T.float32(0)
T_softmax_expsum_shared[i0_4] = (
T_softmax_expsum_shared[i0_4] + T_softmax_exp[i0_4, k]
)
for i1_0 in T.serial(32):
for i1_1 in T.thread_binding(8, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_5 = T.axis.spatial(256, i0_3)
i1 = T.axis.spatial(256, i1_0 * 8 + i1_1)
T.reads(T_softmax_exp[i0_5, i1], T_softmax_expsum_shared[i0_5])
T.writes(T_softmax_norm[i0_5, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_5, i1] = (
T_softmax_exp[i0_5, i1] / T_softmax_expsum_ |
shared[i0_5]
)
decision_0 = []
decision_1 = [
("SampleCategorical", 7),
]
decision_2 = [
("SampleCategorical", 1),
]
decision_3 = [
("SampleCategorical", 1),
("SampleCategorical", 7),
]
mod = create_prim_func(te_workload.softmax_mn(n=256, m=256))
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3090", host="llvm"),
types=ms.schedule_rule.CrossThreadReduction,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[softmax_mn_0, softmax_mn_1, softmax_mn_2, softmax_mn_3],
expected_decisions=[decision_0, decision_1, decision_2, decision_3],
)
def test_gpu_softmax_mn_after_inline():
@T.prim_func
def softmax_mn_after_inline_0(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_2, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_2, k], T_softmax_maxelem[i0_2])
T.writes(T_softmax_expsum[i0_2])
with T.init():
T_softmax_expsum[i0_2] = T.float32(0)
T_softmax_expsum[i0_2] = T_softmax_expsum[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i0_3, i1 in T.grid |
(256, 256):
with T.block("T_softmax_norm"):
i0_4, i1_1 = T.axis.remap("SS", [i0_3, i1])
T.reads(A[i0_4, i1_1], T_softmax_maxelem[i0_4], T_softmax_expsum[i0_4])
T.writes(T_softmax_norm[i0_4, i1_1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_4, i1_1] = (
T.exp(A[i0_4, i1_1] - T_softmax_maxelem[i0_4], dtype="float32")
/ T_softmax_expsum[i0_4]
)
@T.prim_func
def softmax_mn_after_inline_1(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0, i1_0 in T.grid(256, 4):
for i1_1 in T.thread_binding(64, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, i1_0 * 64 + i1_1)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_2, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_2, k], T_softmax_maxelem[i0_2])
T.writes(T_softmax_expsum[i0_2])
with T.init():
T_softmax_expsum[i0_2] = T.float32(0)
T_softmax_expsum[i0_2] = T_softmax_expsum[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_4, i1_1 = T.axis.remap("SS", [i0_3, i1])
T.reads(A[i0_4, i1_1], |
T_softmax_maxelem[i0_4], T_softmax_expsum[i0_4])
T.writes(T_softmax_norm[i0_4, i1_1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_4, i1_1] = (
T.exp(A[i0_4, i1_1] - T_softmax_maxelem[i0_4], dtype="float32")
/ T_softmax_expsum[i0_4]
)
@T.prim_func
def softmax_mn_after_inline_2(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0_3 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_2 = T.axis.spatial(256, ax0 + i0_3)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_2, k], T_softmax_maxelem[i0_2])
T.writes(T_softmax_expsum_shared[i0_2])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"): |
with T.block("T_softmax_norm"):
T.where(i1_0 * 512 + i1_1 < 256)
i0_4 = T.axis.spatial(256, i0_3)
i1_1_1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.reads(
A[i0_4, i1_1_1], T_softmax_maxelem[i0_4], T_softmax_expsum_shared[i0_4]
)
T.writes(T_softmax_norm[i0_4, i1_1_1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_4, i1_1_1] = (
T.exp(A[i0_4, i1_1_1] - T_softmax_maxelem[i0_4], dtype="float32")
/ T_softmax_expsum_shared[i0_4]
)
@T.prim_func
def softmax_mn_after_inline_3(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0_3 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_1 = T.axis.spatial(256, ax0 + i0_3)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"): |
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_2 = T.axis.spatial(256, ax0 + i0_3)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_2, k], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_expsum_shared[i0_2])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
T.where(i1_0 * 512 + i1_1 < 256)
i0_4 = T.axis.spatial(256, i0_3)
i1_1_1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.reads(
A[i0_4, i1_1_1],
T_softmax_maxelem_shared[i0_4],
T_softmax_expsum_shared[i0_4],
)
T.writes(T_softmax_norm[i0_4, i1_1_1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_4, i1_1_1] = (
T.exp(A[i0_4, i1_1_1] - T_softmax_maxelem_shared[i0_4], dtype="float32")
/ T_softmax_expsum_shared[i0_4]
)
decision_0 = []
decision_1 = [
("SampleCategorical", 4),
]
decision_2 = [
("SampleCategorical", 7),
]
decision_3 = [
("SampleCategorical", 7),
("SampleCategorical", 0),
]
mod = Softmax_mn_after_inline
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3090", host="llvm"),
types=ms.schedule_rule.CrossThreadReduction,
)
c |
heck_sketches(
mod,
sketches=actual,
expected_mods=[
softmax_mn_after_inline_0,
softmax_mn_after_inline_1,
softmax_mn_after_inline_2,
softmax_mn_after_inline_3,
],
expected_decisions=[decision_0, decision_1, decision_2, decision_3],
)
def test_gpu_batch_norm_bmn():
@T.prim_func
def batch_norm_bmn_0(A: T.Buffer[(1, 512, 512), "float32"], D: T.Buffer[1, "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C = T.alloc_buffer([1], dtype="float32")
for i0, i1, i2 in T.grid(1, 512, 512):
with T.block("C"):
b, i, j = T.axis.remap("SRR", [i0, i1, i2])
T.reads(A[b, i, j])
T.writes(C[b])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + A[b, i, j] * A[b, i, j]
for i0 in T.serial(1):
with T.block("D"):
b = T.axis.spatial(1, i0)
T.reads(C[b])
T.writes(D[b])
D[b] = T.sqrt(C[b], dtype="float32")
@T.prim_func
def batch_norm_bmn_1(A: T.Buffer[(1, 512, 512), "float32"], D: T.Buffer[1, "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_shared = T.alloc_buffer([1], dtype="float32", scope="shared")
for i0_0 in T.serial(1):
for ax0, ax1_ax2_fused_0 in T.grid(1, 1024):
for ax1_ax2_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("C"):
b = T.axis.spatial(1, ax0)
i = T.axis.reduce(512, (ax1_ax2_fused_0 * 256 + ax1_ax2_fused_1)
j = T.axis.reduce(512, (ax1_ax2_fused_0 * 256 + ax1_ax2_fused_1) % 512)
T.reads(A[b, i, j])
T.writes(C_shared[b])
with T.init(): |
C_shared[b] = T.float32(0)
C_shared[b] = C_shared[b] + A[b, i, j] * A[b, i, j]
for i0_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("D"):
T.where(i0_0 * 256 + i0_1 < 1)
b = T.axis.spatial(1, i0_0 * 256 + i0_1)
T.reads(C_shared[b])
T.writes(D[b])
D[b] = T.sqrt(C_shared[b], dtype="float32")
decision_0 = []
decision_1 = [
("SampleCategorical", 6),
]
mod = create_prim_func(te_workload.norm_bmn(B=1, M=512, N=512))
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3090", host="llvm"),
types=ms.schedule_rule.CrossThreadReduction,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[batch_norm_bmn_0, batch_norm_bmn_1],
expected_decisions=[decision_0, decision_1],
)
@T.prim_func
def argmax(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1 in T.grid(128, 128):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_32(
idx: T.Buffer[(1, 32), "int32"],
val: T.Buffer[(1, 32), "float32"],
argmax_v0: T.Buffer[(1,), "int32"],
argmax_v1: T.Buffer[(1,), "float32"],
) -> None:
for i0, i1 in T.gr |
id(1, 32):
with T.block("argmax"):
i = T.axis.spatial(1, i0)
k = T.axis.reduce(32, i1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
def test_gpu_argmax():
@T.prim_func
def argmax_0(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[128, "int32"],
argmax_v1: T.Buffer[128, "float32"],
) -> None:
for i0, i1 in T.grid(128, 128):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_1(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[128, "int32"],
argmax_v1: T.Buffer[128, "float32"],
) -> None:
for i0, i1_0 in T.grid(128, 2):
for i1_1 in T.thread_binding(64, thread="threadIdx.x"):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_ |
0 * 64 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
decision_0 = []
decision_1 = [
("SampleCategorical", 4),
]
mod = argmax
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3090", host="llvm"),
types=ms.schedule_rule.CrossThreadReduction,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[argmax_0, argmax_1],
expected_decisions=[decision_0, decision_1],
)
def test_gpu_argmax_32():
@T.prim_func
def argmax_0(
idx: T.Buffer[(1, 32), "int32"],
val: T.Buffer[(1, 32), "float32"],
argmax_v0: T.Buffer[(1,), "int32"],
argmax_v1: T.Buffer[(1,), "float32"],
) -> None:
for i0, i1 in T.grid(1, 32):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0 |
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_1(
idx: T.Buffer[(1, 32), "int32"],
val: T.Buffer[(1, 32), "float32"],
argmax_v0: T.Buffer[(1,), "int32"],
argmax_v1: T.Buffer[(1,), "float32"],
) -> None:
for i0, i1_0 in T.grid(1, 1):
for i1_1 in T.thread_binding(64, thread="threadIdx.x"):
with T.block("argmax"):
i = T.axis.spatial(1, i0)
k = T.axis.reduce(32, i1_0 * 64 + i1_1)
T.where(i1_0 * 64 + i1_1 < 32)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
decision_0 = []
decision_1 = [
("SampleCategorical", 4),
]
mod = argmax_32
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3090", host="llvm"),
types=ms.schedule_rule.CrossThreadReduction,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[argmax_0, argmax_1],
expected_decisions=[decision_0, decision_1],
)
if __name__ == "__main__":
test_gpu_softmax_mn()
test_gpu_softmax_mn_after_inline()
test_gpu_batch_norm_bmn()
test_gpu_argmax()
test_gpu_argmax_32() |
from tvm |
import meta_schedule as ms
from tvm |
import target, te
from tvm.meta_schedule.testing |
import te_workload
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
)
from tvm.script |
import tir as T
from tvm.target |
import Target
def test_cpu_matmul():
@T.prim_func
def cpu_matmul_0(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
C: T.Buffer[(512, 512), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_global = T.alloc_buffer([512, 512], dtype="float32")
for i0_0, i1_0, i0_1, i1_1 in T.grid(1, 8, 8, 1):
for i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(16, 2, 8, 32, 32, 8):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 512 + i0_1 * 64 + i0_2 * 32 + i0_3)
j = T.axis.spatial(512, i1_0 * 64 + i1_1 * 64 + i1_2 * 8 + i1_3)
k = T.axis.reduce(512, i2_0 * 32 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C_global[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C_global[i, j] = T.float32(0)
C_global[i, j] = C_global[i, j] + A[i, k] * B[k, j]
for ax0, ax1 in T.grid(64, 64):
with T.block("C_global"):
v0 = T.axis.spatial(512, i0_1 * 64 + ax0)
v1 = T.axis.spatial(512, i1_0 * 64 + ax1)
T.reads(C_global[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_global[v0, v1]
@T.prim_func
def cpu_matmul_1(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
C: T.Buffer[(512, 512), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_global = T.alloc_buffer([512, 512], dtype="float32")
for i0_0, i1_0 in T.grid(1, 8):
for i0_1, i1_1, i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(8, 1, 16, 2, 8, 32, 32, 8):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 512 + i0 |
_1 * 64 + i0_2 * 32 + i0_3)
j = T.axis.spatial(512, i1_0 * 64 + i1_1 * 64 + i1_2 * 8 + i1_3)
k = T.axis.reduce(512, i2_0 * 32 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C_global[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C_global[i, j] = T.float32(0)
C_global[i, j] = C_global[i, j] + A[i, k] * B[k, j]
for ax0, ax1 in T.grid(512, 64):
with T.block("C_global"):
v0 = T.axis.spatial(512, ax0)
v1 = T.axis.spatial(512, i1_0 * 64 + ax1)
T.reads(C_global[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_global[v0, v1]
@T.prim_func
def cpu_matmul_2(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
C: T.Buffer[(512, 512), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0_0, i1_0, i0_1, i1_1, i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(
1, 8, 8, 1, 16, 2, 8, 32, 32, 8
):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 512 + i0_1 * 64 + i0_2 * 32 + i0_3)
j = T.axis.spatial(512, i1_0 * 64 + i1_1 * 64 + i1_2 * 8 + i1_3)
k = T.axis.reduce(512, i2_0 * 32 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
decision_0 = [
("SamplePerfectTile", [1, 8, 2, 32]),
("SamplePerfectTile", [8, 1, 8, 8]),
("SamplePerfectTile", [16, 32]),
]
decision_1 = [
("SamplePerfectTile", [1, 8, 2, 32]),
("SamplePerfectTile", [8, 1, |
8, 8]),
("SamplePerfectTile", [16, 32]),
]
decision_2 = [
("SamplePerfectTile", [1, 8, 2, 32]),
("SamplePerfectTile", [8, 1, 8, 8]),
("SamplePerfectTile", [16, 32]),
]
mod = te.create_prim_func(te_workload.matmul(512, 512, 512))
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm"),
types=ms.schedule_rule.MultiLevelTiling,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[cpu_matmul_0, cpu_matmul_1, cpu_matmul_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_matmul_relu():
@T.prim_func
def cpu_matmul_relu_0(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
compute: T.Buffer[(512, 512), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C = T.alloc_buffer([512, 512], dtype="float32")
for i0_0, i1_0, i0_1, i1_1, i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(
256, 4, 1, 4, 64, 1, 32, 8, 2, 1
):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 2 + i0_1 * 2 + i0_2 * 2 + i0_3)
j = T.axis.spatial(512, i1_0 * 128 + i1_1 * 32 + i1_2 + i1_3)
k = T.axis.reduce(512, i2_0 * 8 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
for i0, i1 in T.grid(512, 512):
with T.block("compute"):
i0_4, i1_4 = T.axis.remap("SS", [i0, i1])
T.reads(C[i0_4, i1_4])
T.writes(compute[i0_4, i1_4])
compute[i0_4, i1_4] = T.max(C[i0_4, i1_4], T.float32(0))
@T.prim_func
def cpu_matmul_relu_1(
A: T.Buffer[(512, 512), "floa |
t32"],
B: T.Buffer[(512, 512), "float32"],
compute: T.Buffer[(512, 512), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C = T.alloc_buffer([512, 512], dtype="float32")
for i0_0, i1_0, i0_1, i1_1 in T.grid(256, 4, 1, 4):
for i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(64, 1, 32, 8, 2, 1):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 2 + i0_1 * 2 + i0_2 * 2 + i0_3)
j = T.axis.spatial(512, i1_0 * 128 + i1_1 * 32 + i1_2 + i1_3)
k = T.axis.reduce(512, i2_0 * 8 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
for ax0, ax1 in T.grid(2, 32):
with T.block("compute"):
i0 = T.axis.spatial(512, i0_0 * 2 + ax0)
i1 = T.axis.spatial(512, i1_0 * 128 + i1_1 * 32 + ax1)
T.reads(C[i0, i1])
T.writes(compute[i0, i1])
compute[i0, i1] = T.max(C[i0, i1], T.float32(0))
@T.prim_func
def cpu_matmul_relu_2(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
compute: T.Buffer[(512, 512), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C = T.alloc_buffer([512, 512], dtype="float32")
for i0_0, i1_0 in T.grid(256, 4):
for i0_1, i1_1, i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(1, 4, 64, 1, 32, 8, 2, 1):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 2 + i0_1 * 2 + i0_2 * 2 + i0_3)
j = T.axis.spatial(512, i1_0 * 128 + i1_1 * 32 + i1_2 + i1_3) |
k = T.axis.reduce(512, i2_0 * 8 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
for ax0, ax1 in T.grid(2, 128):
with T.block("compute"):
i0 = T.axis.spatial(512, i0_0 * 2 + ax0)
i1 = T.axis.spatial(512, i1_0 * 128 + ax1)
T.reads(C[i0, i1])
T.writes(compute[i0, i1])
compute[i0, i1] = T.max(C[i0, i1], T.float32(0))
decision_0 = [
("SamplePerfectTile", [256, 1, 1, 2]),
("SamplePerfectTile", [4, 4, 32, 1]),
("SamplePerfectTile", [64, 8]),
]
decision_1 = [
("SamplePerfectTile", [256, 1, 1, 2]),
("SamplePerfectTile", [4, 4, 32, 1]),
("SamplePerfectTile", [64, 8]),
]
decision_2 = [
("SamplePerfectTile", [256, 1, 1, 2]),
("SamplePerfectTile", [4, 4, 32, 1]),
("SamplePerfectTile", [64, 8]),
]
mod = te.create_prim_func(te_workload.matmul_relu(512, 512, 512))
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm"),
types=ms.schedule_rule.MultiLevelTiling,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[cpu_matmul_relu_0, cpu_matmul_relu_1, cpu_matmul_relu_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cuda_matmul():
@T.prim_func
def cuda_matmul_0(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
C: T.Buffer[(512, 512), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local")
A_shared = T.alloc_buffer |
([512, 512], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(128, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(8, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(4, thread="threadIdx.x"):
for i2_0 in T.serial(128):
for ax0_ax1_fused in T.serial(256):
with T.block("A_shared"):
v0 = T.axis.spatial(
512, i0_0_i1_0_fused
)
v1 = T.axis.spatial(512, i2_0 * 4 + ax0_ax1_fused % 4)
T.reads(A[v0, v1])
T.writes(A_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 2})
A_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused in T.serial(128):
with T.block("B_shared"):
v0 = T.axis.spatial(512, i2_0 * 4 + ax0_ax1_fused
v1 = T.axis.spatial(
512, i0_0_i1_0_fused % 16 * 32 + ax0_ax1_fused % 32
)
T.reads(B[v0, v1])
T.writes(B_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
B_shared[v0, v1] = B[v0, v1]
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(2, 1, 1, 2, 16, 4):
with T.block("C"):
i = T.axis.spatial(
512,
i0_0_i1_0_fused
+ i0_1_i1_1_fused
+ i0_3 * 16 |
+ i0_4,
)
j = T.axis.spatial(
512,
i0_0_i1_0_fused % 16 * 32
+ i0_1_i1_1_fused % 2 * 16
+ i0_2_i1_2_fused * 4
+ i1_3 * 4
+ i1_4,
)
k = T.axis.reduce(512, i2_0 * 4 + i2_1 * 2 + i2_2)
T.reads(A_shared[i, k], B_shared[k, j])
T.writes(C_local[i, j])
T.block_attr(
{
"meta_schedule.thread_extent_high_inclusive": 1024,
"meta_schedule.thread_extent_low_inclusive": 32,
"meta_schedule.tiling_structure": "SSSRRSRS",
}
)
with T.init():
C_local[i, j] = T.float32(0)
C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j]
for ax0, ax1 in T.grid(16, 4):
with T.block("C_local"):
v0 = T.axis.spatial(
512, i0_0_i1_0_fused
)
v1 = T.axis.spatial(
512,
i0_0_i1_0_fused % 16 * 32
+ i0_1_i1_1_fused % 2 * 16
+ i0_2_i1_2_fused * 4
+ ax1,
)
T.reads(C_local[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_local[v0, v1]
decision_0 = [
("Sampl |
ePerfectTile", [8, 4, 1, 1, 16]),
("SamplePerfectTile", [16, 2, 4, 1, 4]),
("SamplePerfectTile", [128, 2, 2]),
("SampleCategorical", 1),
("SampleCategorical", 0),
]
mod = te.create_prim_func(te_workload.matmul(512, 512, 512))
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080"),
types=ms.schedule_rule.MultiLevelTiling,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[cuda_matmul_0],
expected_decisions=[decision_0],
)
def test_cuda_matmul_relu():
@T.prim_func
def cuda_matmul_relu_0(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
compute: T.Buffer[(512, 512), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C = T.alloc_buffer([512, 512], dtype="float32")
C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local")
A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(64, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(64, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(8, thread="threadIdx.x"):
for i2_0 in T.serial(8):
for ax0_ax1_fused in T.serial(4096):
with T.block("A_shared"):
v0 = T.axis.spatial(
512, i0_0_i1_0_fused
)
v1 = T.axis.spatial(512, i2_0 * 64 + ax0_ax1_fused % 64)
T.reads(A[v0, v1])
T.writes(A_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 2}) |
A_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused in T.serial(4096):
with T.block("B_shared"):
v0 = T.axis.spatial(512, i2_0 * 64 + ax0_ax1_fused
v1 = T.axis.spatial(
512, i0_0_i1_0_fused % 8 * 64 + ax0_ax1_fused % 64
)
T.reads(B[v0, v1])
T.writes(B_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
B_shared[v0, v1] = B[v0, v1]
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(8, 2, 1, 8, 2, 2):
with T.block("C"):
i = T.axis.spatial(
512,
i0_0_i1_0_fused
+ i0_1_i1_1_fused
+ i0_2_i1_2_fused
+ i0_3 * 2
+ i0_4,
)
j = T.axis.spatial(
512,
i0_0_i1_0_fused % 8 * 64
+ i0_1_i1_1_fused % 8 * 8
+ i0_2_i1_2_fused % 4 * 2
+ i1_3 * 2
+ i1_4,
)
k = T.axis.reduce(512, i2_0 * 64 + i2_1 * 8 + i2_2)
T.reads(A_shared[i, k], B_shared[k, j])
T.writes(C_local[i, j])
T.block_attr(
{
"meta_schedule.thread_extent_high_inclusive": 1024,
"meta_schedule.thread_extent_ |
low_inclusive": 32,
"meta_schedule.tiling_structure": "SSSRRSRS",
}
)
with T.init():
C_local[i, j] = T.float32(0)
C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j]
for ax0, ax1 in T.grid(4, 2):
with T.block("C_local"):
v0 = T.axis.spatial(
512,
i0_0_i1_0_fused
+ i0_1_i1_1_fused
+ i0_2_i1_2_fused
+ ax0,
)
v1 = T.axis.spatial(
512,
i0_0_i1_0_fused % 8 * 64
+ i0_1_i1_1_fused % 8 * 8
+ i0_2_i1_2_fused % 4 * 2
+ ax1,
)
T.reads(C_local[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_local[v0, v1]
for i0, i1 in T.grid(512, 512):
with T.block("compute"):
i0_1, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(C[i0_1, i1_1])
T.writes(compute[i0_1, i1_1])
compute[i0_1, i1_1] = T.max(C[i0_1, i1_1], T.float32(0))
decision_0 = [
("SamplePerfectTile", [8, 8, 2, 2, 2]),
("SamplePerfectTile", [8, 8, 4, 1, 2]),
("SamplePerfectTile", [8, 8, 8]),
("SampleCategorical", 1),
("SampleCategorical", 3),
]
mod = te.create_prim_func(te_workload.matmul_relu(512, 512, 512))
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080"),
types=ms.schedule_rule.MultiLevelTiling, |
)
check_sketches(
mod,
sketches=actual,
expected_mods=[cuda_matmul_relu_0],
expected_decisions=[decision_0],
)
def test_cuda_sum_with_trivial_block_iter():
@T.prim_func
def sum_with_trivial_block_iter(
A: T.Buffer[(1, 64, 768), "float32"],
B: T.Buffer[(1, 64, 1), "float32"],
) -> None:
for i0, i1, i2, i3 in T.grid(1, 64, 1, 768):
with T.block("sum"):
ax0, ax1, ax2, k2 = T.axis.remap("SSSR", [i0, i1, i2, i3])
T.reads(A[ax0, ax1, k2])
T.writes(B[ax0, ax1, ax2])
with T.init():
B[ax0, ax1, ax2] = T.float32(0)
B[ax0, ax1, ax2] = B[ax0, ax1, ax2] + A[ax0, ax1, k2]
mod = sum_with_trivial_block_iter
(sch,) = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080"),
types=ms.schedule_rule.MultiLevelTiling,
)
assert not sch.trace.simplified(remove_postproc=True).insts
def test_multi_level_tiling_hexagon():
@T.prim_func
def cpu_conv2d_nhwc(
inputs: T.Buffer[(1, 56, 56, 64), "float16"],
weight: T.Buffer[(3, 3, 64, 64), "float16"],
conv2d_nhwc: T.Buffer[(1, 56, 56, 64), "float16"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
PadInput = T.alloc_buffer([1, 58, 58, 64], dtype="float16")
for i0, i1, i2, i3 in T.grid(1, 58, 58, 64):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 1, i2_1 - 1, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
1 <= i1_1 and i1_1 < 57 and 1 <= i2_1 and i2_1 < 57,
inputs[i0_1, i1_1 - 1, i2_1 - 1, i3_1],
T.float16(0),
dtype="float16", |
)
for (
i0_0,
i1_0,
i2_0,
i3_0,
i4_0,
i5_0,
i6_0,
i0_1_1,
i1_1_1,
i2_1_1,
i3_1_1,
i4_1,
i5_1,
i6_1,
i0_2,
i1_2,
i2_2,
i3_2,
) in T.grid(1, 1, 2, 1, 3, 3, 16, 1, 14, 2, 1, 1, 1, 4, 1, 4, 14, 64):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_1_1 + i0_2 + i0_0)
h = T.axis.spatial(56, i1_0 * 56 + i1_1_1 * 4 + i1_2)
w = T.axis.spatial(56, i2_0 * 28 + i2_1_1 * 14 + i2_2)
co = T.axis.spatial(64, i3_0 * 64 + i3_1_1 * 64 + i3_2)
rh = T.axis.reduce(3, i4_1 + i4_0)
rw = T.axis.reduce(3, i5_0 + i5_1)
rc = T.axis.reduce(64, i6_0 * 4 + i6_1)
T.reads(PadInput[n, h + rh, w + rw, co
T.writes(conv2d_nhwc[n, h, w, co])
T.block_attr({"meta_schedule.tiling_structure": "SRSRS"})
with T.init():
conv2d_nhwc[n, h, w, co] = T.float16(0)
conv2d_nhwc[n, h, w, co] = (
conv2d_nhwc[n, h, w, co]
+ PadInput[n, h + rh, w + rw, co
)
target_hexagon = target.hexagon("v69", num_cores=4)
I = 64
O = 64
H = 56
W = 56
mod = te.create_prim_func(
te_workload.conv2d_nhwc(1, H, W, I, O, 3, 1, 1, 1, in_dtype="float16", out_dtype="float16")
)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target(target_hexagon, host=target_hexagon),
types=None,
sch_rules=[
ms.schedule_rule.MultiLevelTilingWideVector(
structure="SRSRS",
vector_length_in_bits=1024,
max_innermost_factor=64,
reuse_read=None,
reuse_write=None,
)
],
)
decision_0 = [ |
("SamplePerfectTile", [1, 1, 1]),
("SamplePerfectTile", [1, 14, 4]),
("SamplePerfectTile", [2, 2, 14]),
("SamplePerfectTile", [3, 1]),
("SamplePerfectTile", [3, 1]),
("SamplePerfectTile", [16, 4]),
]
check_sketches(
mod,
sketches=actual,
expected_mods=[cpu_conv2d_nhwc],
expected_decisions=[decision_0],
)
def test_cache_read_specify_consumer():
A, B, C = te_workload.matmul(512, 512, 512)
mod = te.create_prim_func([A, B, C + A])
space = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080"),
types=ms.schedule_rule.MultiLevelTiling,
)
residual_block = """
for i0, i1 in T.grid(512, 512):
with T.block("T_add"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(C[ax0, ax1], A[ax0, ax1])
T.writes(T_add[ax0, ax1])
T_add[ax0, ax1] = C[ax0, ax1] + A[ax0, ax1]
"""
assert residual_block in space[0].mod.script()
if __name__ == "__main__":
test_cpu_matmul()
test_cpu_matmul_relu()
test_cuda_matmul()
test_cuda_matmul_relu()
test_cuda_sum_with_trivial_block_iter()
test_multi_level_tiling_hexagon() |
from tvm |
import meta_schedule as ms
from tvm |
import te
from tvm.ir |
import assert_structural_equal
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
)
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.tir.tensor_intrin.arm_cpu |
import DP4A_INTRIN
from tvm.tir.tensor_intrin.x86 |
import VNNI_DOT_16x4_INTRIN as VNNI_INTRIN
def test_vnni_conv2d_nchwc():
@T.prim_func
def conv2d_nchwc(
placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"],
placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"],
conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4):
with T.block("conv2d_NCHWc_int8"):
(
n,
oc_chunk,
oh,
ow,
oc_block,
kh,
kw,
ic_outer,
ic_f_inner,
ic_s_inner,
) = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9])
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32"
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
@T.prim_func
def vnni_conv2d_nchwc_0(placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"], placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"], conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalia |
s": True})
conv2d_NCHWc_int8_global = T.alloc_buffer([1, 16, 56, 56, 16], dtype="int32")
for i0_0, i1_0, i2_0, i3_0, i4_0_0, i0_1, i1_1, i2_1, i3_1, i4_0_1 in T.grid(1, 8, 28, 56, 1, 1, 2, 1, 1, 1):
for i5_0, i6_0, i7_0, i8_0, i9_0_0, i0_2, i1_2, i2_2, i3_2, i4_0_2, i5_1, i6_1, i7_1, i8_1, i9_0_1, i0_3, i1_3, i2_3, i3_3, i4_0_3 in T.grid(1, 1, 1, 4, 1, 1, 1, 2, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1):
with T.block("conv2d_NCHWc_int8_o"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1_0 * 2 + i1_1 + i1_2 + i1_3)
oh = T.axis.spatial(56, i2_0 * 2 + i2_1 * 2 + i2_2 + i2_3)
ow = T.axis.spatial(56, i3_3 + i3_0 + i3_1 + i3_2)
oc_block_o = T.axis.spatial(1, 0)
kh = T.axis.reduce(1, 0)
kw = T.axis.reduce(1, 0)
ic_outer = T.axis.reduce(4, i7_0 * 4 + i7_1)
ic_f_inner = T.axis.reduce(4, i8_0 + i8_1)
ic_s_inner_o = T.axis.reduce(1, 0)
T.reads(placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0 : 16, 0 : 4])
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, 0 : 16])
T.block_attr({"meta_schedule.auto_tensorize":"dot_16x4_vnni"})
with T.init():
for i4_1 in T.serial(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_i_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i_init])
conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i_init] = 0
for i4_1, i9_1 in T.grid(16, 4):
with T.block("conv2d_NCHWc_int8"):
oc_ |
block_i, ic_s_inner_i = T.axis.remap("SR", [i4_1, i9_1])
T.reads(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i], placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i])
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i] = conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i] + T.cast(placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i], "int32") * T.cast(placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i], "int32")
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 1, 2, 1, 16):
with T.block("conv2d_NCHWc_int8_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(16, i1_0 * 2 + i1_1 + ax1)
v2 = T.axis.spatial(56, i2_0 * 2 + ax2)
v3 = T.axis.spatial(56, i3_0 + ax3)
v4 = T.axis.spatial(16, ax4)
T.reads(conv2d_NCHWc_int8_global[v0, v1, v2, v3, v4])
T.writes(conv2d_NCHWc_int8[v0, v1, v2, v3, v4])
conv2d_NCHWc_int8[v0, v1, v2, v3, v4] = conv2d_NCHWc_int8_global[v0, v1, v2, v3, v4]
@T.prim_func
def vnni_conv2d_nchwc_1(placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"], placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"], conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
conv2d_NCHWc_int8_global = T.alloc_buffer([1, 16, 56, 56, 16], dtype="int32")
for i0_0, i1_0, i2_0, i3_0, i4_0_0 in T.grid(1, 8, 28, 56, 1):
for i0_1, i1_1, i2_1, i3_1, i4_0_1, i5_0, i6_0, i7_0, i8_0, i9_0_0, i0_2, i1_2, i2_2, i3_2, i4_0_2, i5_1, i |
6_1, i7_1, i8_1, i9_0_1, i0_3, i1_3, i2_3, i3_3, i4_0_3 in T.grid(1, 2, 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 2, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1):
with T.block("conv2d_NCHWc_int8_o"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1_0 * 2 + i1_1 + i1_2 + i1_3)
oh = T.axis.spatial(56, i2_0 * 2 + i2_1 * 2 + i2_2 + i2_3)
ow = T.axis.spatial(56, i3_3 + i3_0 + i3_1 + i3_2)
oc_block_o = T.axis.spatial(1, 0)
kh = T.axis.reduce(1, 0)
kw = T.axis.reduce(1, 0)
ic_outer = T.axis.reduce(4, i7_0 * 4 + i7_1)
ic_f_inner = T.axis.reduce(4, i8_0 + i8_1)
ic_s_inner_o = T.axis.reduce(1, 0)
T.reads(placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0 : 16, 0 : 4])
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, 0 : 16])
T.block_attr({"meta_schedule.auto_tensorize":"dot_16x4_vnni"})
with T.init():
for i4_1 in T.serial(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_i_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i_init])
conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i_init] = 0
for i4_1, i9_1 in T.grid(16, 4):
with T.block("conv2d_NCHWc_int8"):
oc_block_i, ic_s_inner_i = T.axis.remap("SR", [i4_1, i9_1])
T.reads(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i], placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_blo |
ck_i, ic_s_inner_i])
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i] = conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i] + T.cast(placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i], "int32") * T.cast(placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i], "int32")
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 2, 2, 1, 16):
with T.block("conv2d_NCHWc_int8_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(16, i1_0 * 2 + ax1)
v2 = T.axis.spatial(56, i2_0 * 2 + ax2)
v3 = T.axis.spatial(56, i3_0 + ax3)
v4 = T.axis.spatial(16, ax4)
T.reads(conv2d_NCHWc_int8_global[v0, v1, v2, v3, v4])
T.writes(conv2d_NCHWc_int8[v0, v1, v2, v3, v4])
conv2d_NCHWc_int8[v0, v1, v2, v3, v4] = conv2d_NCHWc_int8_global[v0, v1, v2, v3, v4]
@T.prim_func
def vnni_conv2d_nchwc_2(placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"], placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"], conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0_0, i1_0, i2_0, i3_0, i4_0_0, i0_1, i1_1, i2_1, i3_1, i4_0_1, i5_0, i6_0, i7_0, i8_0, i9_0_0, i0_2, i1_2, i2_2, i3_2, i4_0_2, i5_1, i6_1, i7_1, i8_1, i9_0_1, i0_3, i1_3, i2_3, i3_3, i4_0_3 in T.grid(1, 8, 28, 56, 1, 1, 2, 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 2, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1):
with T.block("conv2d_NCHWc_int8_o"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1_0 * 2 + i1_1 + i1_2 + i1_3)
oh = T.axis.spatial(56, i2_0 * 2 + i2_1 * 2 + i2_2 + i2_3)
ow = T |
.axis.spatial(56, i3_3 + i3_0 + i3_1 + i3_2)
oc_block_o = T.axis.spatial(1, 0)
kh = T.axis.reduce(1, 0)
kw = T.axis.reduce(1, 0)
ic_outer = T.axis.reduce(4, i7_0 * 4 + i7_1)
ic_f_inner = T.axis.reduce(4, i8_0 + i8_1)
ic_s_inner_o = T.axis.reduce(1, 0)
T.reads(placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0 : 16, 0 : 4])
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0 : 16])
T.block_attr({"meta_schedule.auto_tensorize":"dot_16x4_vnni"})
with T.init():
for i4_1 in T.serial(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_i_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i_init])
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i_init] = 0
for i4_1, i9_1 in T.grid(16, 4):
with T.block("conv2d_NCHWc_int8"):
oc_block_i, ic_s_inner_i = T.axis.remap("SR", [i4_1, i9_1])
T.reads(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i], placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i])
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i] = conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i] + T.cast(placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i], "int32") * T.cast(placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i], "int32")
decision_0 = [ |
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [8, 2, 1, 1]),
("SamplePerfectTile", [28, 1, 2, 1]),
("SamplePerfectTile", [56, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 4]),
("SamplePerfectTile", [4, 1]),
("SamplePerfectTile", [1, 1]),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [8, 2, 1, 1]),
("SamplePerfectTile", [28, 1, 2, 1]),
("SamplePerfectTile", [56, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 4]),
("SamplePerfectTile", [4, 1]),
("SamplePerfectTile", [1, 1]),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [8, 2, 1, 1]),
("SamplePerfectTile", [28, 1, 2, 1]),
("SamplePerfectTile", [56, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 4]),
("SamplePerfectTile", [4, 1]),
("SamplePerfectTile", [1, 1]),
]
mod = conv2d_nchwc
target = Target("llvm -mcpu=cascadelake -num-cores=4")
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target(target),
types=None,
sch_rules=[
ms.schedule_rule.MultiLevelTilingWithIntrin(
VNNI_INTRIN,
structure="SSRSRS",
tile_binds=None,
max_innermost_factor=64,
vector_load_lens=None,
reuse_read=None,
reuse_write=ms.schedule_rule.ReuseType(req="may", levels=[1, 2], scope="global"),
),
],
)
check_sketches(
mod,
sketches=actual,
expected_mods=[vnni_conv2d_nchwc_0, |
vnni_conv2d_nchwc_1, vnni_conv2d_nchwc_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def _check_dp4a_dense(m, n, k, in_dtype, out_dtype, expected_mods, expected_decisions):
def _dense(m, n, k, in_dtype, out_dtype):
X = te.placeholder((m, k), name="X", dtype=in_dtype)
W = te.placeholder((n, k), name="W", dtype=in_dtype)
ak = te.reduce_axis((0, k), name="k")
matmul = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype(out_dtype) * W[j, ak].astype(out_dtype),
axis=ak,
),
name="compute",
)
return te.create_prim_func([X, W, matmul])
mod = _dense(m, n, k, in_dtype, out_dtype)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("cuda"),
types=None,
sch_rules=[
ms.schedule_rule.MultiLevelTilingWithIntrin(
DP4A_INTRIN,
structure="SSSRRSRS",
tile_binds=["blockIdx.x", "vthread.x", "threadIdx.x"],
max_innermost_factor=64,
vector_load_lens=[1, 2, 3, 4],
reuse_read=ms.schedule_rule.ReuseType(req="must", levels=[4], scope="shared"),
reuse_write=ms.schedule_rule.ReuseType(req="must", levels=[3], scope="local"),
)
],
)
if expected_mods is None:
assert expected_decisions is None
assert len(actual) == 1
assert_structural_equal(mod, actual[0].mod["main"])
else:
check_sketches(mod, actual, expected_mods, expected_decisions)
def test_dp4a_dense():
@T.prim_func
def dp4a_dense_0(
X: T.Buffer[(128, 128), "int8"],
W: T.Buffer[(128, 128), "int8"],
compute: T.Buffer[(128, 128), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
compute_local = T.alloc_buffer([128, 128], dtype="int32", scope="local")
X_shared = |
T.alloc_buffer([128, 128], dtype="int8", scope="shared")
W_shared = T.alloc_buffer([128, 128], dtype="int8", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(1, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(512, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(2, thread="threadIdx.x"):
for i2_0_0 in T.serial(1):
for ax0_ax1_fused in T.serial(16384):
with T.block("X_shared"):
v0 = T.axis.spatial(128, ax0_ax1_fused
v1 = T.axis.spatial(128, ax0_ax1_fused % 128)
T.reads(X[v0, v1])
T.writes(X_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
X_shared[v0, v1] = X[v0, v1]
for ax0_ax1_fused in T.serial(16384):
with T.block("W_shared"):
v0 = T.axis.spatial(128, ax0_ax1_fused
v1 = T.axis.spatial(128, ax0_ax1_fused % 128)
T.reads(W[v0, v1])
T.writes(W_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
W_shared[v0, v1] = W[v0, v1]
for i2_0_1, i0_3, i1_3, i2_0_2, i0_4, i1_4 in T.grid(1, 2, 4, 32, 2, 1):
with T.block("compute_o"):
i = T.axis.spatial(
128,
i0_1_i1_1_fused
+ i0_2_i1_2_fused * 4
+ i0_3 * 2
+ i0_4,
)
j = T.axis.spatial(128, i1_4 + i0_1_i1_1_fused % 32 * 4 + i1_3) |
k_o = T.axis.reduce(32, i2_0_0 * 32 + i2_0_1 * 32 + i2_0_2)
T.reads(
X_shared[i, k_o * 4 : k_o * 4 + 4],
W_shared[j, k_o * 4 : k_o * 4 + 4],
)
T.writes(compute_local[i, j])
T.block_attr({"meta_schedule.auto_tensorize": "dp4a"})
with T.init():
with T.block("compute_init"):
T.reads()
T.writes(compute_local[i, j])
compute_local[i, j] = 0
for i2_1 in T.serial(4):
with T.block("compute"):
k_i = T.axis.reduce(4, i2_1)
T.reads(
compute_local[i, j],
X_shared[i, k_o * 4 + k_i],
W_shared[j, k_o * 4 + k_i],
)
T.writes(compute_local[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"})
compute_local[i, j] = compute_local[i, j] + T.cast(
X_shared[i, k_o * 4 + k_i], "int32"
) * T.cast(W_shared[j, k_o * 4 + k_i], "int32")
for ax0, ax1 in T.grid(4, 4):
with T.block("compute_local"):
v0 = T.axis.spatial(
128, i0_1_i1_1_fused
)
v1 = T.axis.spatial(128, i0_1_i1_1_fused % 32 * 4 + ax1)
T.reads(compute_local[v0, v1]) |
T.writes(compute[v0, v1])
compute[v0, v1] = compute_local[v0, v1]
decision_0 = [
("SamplePerfectTile", [1, 16, 2, 2, 2]),
("SamplePerfectTile", [1, 32, 1, 4, 1]),
("SamplePerfectTile", [1, 1, 32]),
("SampleCategorical", 0),
("SampleCategorical", 0),
]
_check_dp4a_dense(
m=128,
n=128,
k=128,
in_dtype="int8",
out_dtype="int32",
expected_mods=[dp4a_dense_0],
expected_decisions=[decision_0],
)
def test_dp4a_dense_no_tensorize_1():
_check_dp4a_dense(
m=128,
n=128,
k=128,
in_dtype="float32",
out_dtype="float32",
expected_mods=None,
expected_decisions=None,
)
def test_dp4a_dense_no_tensorize_2():
_check_dp4a_dense(
m=127,
n=127,
k=127,
in_dtype="int8",
out_dtype="int32",
expected_mods=None,
expected_decisions=None,
)
if __name__ == "__main__":
test_vnni_conv2d_nchwc()
test_dp4a_dense()
test_dp4a_dense_no_tensorize_1()
test_dp4a_dense_no_tensorize_2() |
import tvm |
import tvm.testing
from tvm |
import meta_schedule as ms
from tvm |
import te
from tvm.meta_schedule.testing |
import te_workload
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
get_rules,
)
from tvm.script |
import tir as T
from tvm.tir.tensor_intrin.cuda |
import get_wmma_intrin_group
def multi_level_tiling_tensor_core(
*,
write_reuse_scope="shared",
in_dtype="float16",
out_dtype="float32",
trans_b=False,
use_software_pipeline=False,
) -> ms.schedule_rule.ScheduleRule:
assert write_reuse_scope in ["shared", "global"]
if not isinstance(in_dtype, list):
in_dtype = [in_dtype]
if not isinstance(out_dtype, list):
out_dtype = [out_dtype]
if not isinstance(trans_b, list):
trans_b = [trans_b]
return ms.schedule_rule.MultiLevelTilingTensorCore(
intrin_groups=[
get_wmma_intrin_group(write_reuse_scope, _in_dtype, _out_dtype, _trans_b)
for _in_dtype in in_dtype
for _out_dtype in out_dtype
for _trans_b in trans_b
],
structure="SSSRRSRS",
tile_binds=["blockIdx.y", "blockIdx.x", "threadIdx.y"],
max_innermost_factor=4,
vector_load_lens=[1, 2, 3, 4, 8, 16],
reuse_read=ms.schedule_rule.ReuseType(
req="must",
levels=[4],
scope="shared",
),
reuse_write=ms.schedule_rule.ReuseType(
req="must" if write_reuse_scope == "shared" else "no",
levels=[2],
scope=write_reuse_scope,
),
use_software_pipeline=use_software_pipeline,
)
def test_matmul_relu():
@T.prim_func
def matmul_relu_0(A: T.Buffer[(128, 128), "float16"], B: T.Buffer[(128, 128), "float16"], compute: T.Buffer[(128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_reindex_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
C_reindex_shared_wmma_accumulator = T.alloc_buffer([128, 128], dtype="float32", scope="wmma.accumulator")
A_reindex_shared = T.alloc_buffer([128, 128], dtype="float16", scope="shared")
B_reindex_shared = T.alloc_buffer([128, 128], dtype="float16", scope="shared")
A_reindex_shared_wmma_matrix_a = T.alloc_buffer([128, 12 |
8], dtype="float16", scope="wmma.matrix_a")
B_reindex_shared_wmma_matrix_b = T.alloc_buffer([128, 128], dtype="float16", scope="wmma.matrix_b")
for ax0_0_0_ax1_0_0_fused in T.thread_binding(8, thread="blockIdx.y"):
for ax0_0_1_ax1_0_1_fused in T.thread_binding(2, thread="blockIdx.x"):
for ax0_0_2_ax1_0_2_fused in T.thread_binding(2, thread="threadIdx.y"):
for ax2_0_0 in T.serial(1):
for ax0_ax1_fused in T.serial(4096):
with T.block("A_reindex_shared"):
v0 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused
v1 = T.axis.spatial(128, ax0_ax1_fused % 128)
T.reads(A[v0, v1])
T.writes(A_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":8})
A_reindex_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused in T.serial(4096):
with T.block("B_reindex_shared"):
v0 = T.axis.spatial(128, ax0_ax1_fused
v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused % 2 * 64 + ax0_0_1_ax1_0_1_fused * 32 + ax0_ax1_fused % 32)
T.reads(B[v0, v1])
T.writes(B_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":1})
B_reindex_shared[v0, v1] = B[v0, v1]
for ax2_0_1 in T.serial(4):
for ax0_0, ax1_0 in T.grid(2, 2):
with T.block("A_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused
v1_o = T.axis.spatial(8, ax2_0_1 * 2 + ax1_0) |
T.reads(A_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_a"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("A_reindex_shared_wmma.matrix_a"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0, ax1_0 in T.grid(2, 1):
with T.block("B_reindex_shared_wmma.matrix_b_o"):
v0_o = T.axis.spatial(8, ax2_0_1 * 2 + ax0_0)
v1_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_2_ax1_0_2_fused)
T.reads(B_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_b"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("B_reindex_shared_wmma.matrix_b"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(B_reindex_shar |
ed[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 1, 2, 2, 1):
with T.block("C_o"):
v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused
v1_o = T.axis.spatial(8, ax1_0_4 + ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_2_ax1_0_2_fused + ax1_0_3)
v2_o = T.axis.reduce(8, ax2_0_0 * 8 + ax2_0_1 * 2 + ax2_0_2)
T.reads(A_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16], B_reindex_shared_wmma_matrix_b[v2_o * 16 : v2_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_f32", "warp_execution":1})
with T.init():
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_init"):
v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads()
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init])
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init] = T.float32(0) |
for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16):
with T.block("C"):
v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1])
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i], A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] + T.cast(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], "float32") * T.cast(B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i], "float32")
for ax0_0, ax1_0 in T.grid(2, 1):
with T.block("C_reindex_shared_wmma.accumulator_o"):
v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused
v1_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_2_ax1_0_2_fused)
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_f32_shared"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_reindex_shared_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(C_reindex_shared |
_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
C_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_ax1_fused in T.serial(1024):
with T.block("C_reindex_shared"):
v0 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused
v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused % 2 * 64 + ax0_0_1_ax1_0_1_fused * 32 + ax0_ax1_fused % 32)
T.reads(C_reindex_shared[v0, v1])
T.writes(compute[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch":4})
compute[v0, v1] = T.max(C_reindex_shared[v0, v1], T.float32(0))
decision_0 = [
("SamplePerfectTile", [4, 1, 1, 1, 2]),
("SamplePerfectTile", [2, 2, 2, 1, 1]),
("SamplePerfectTile", [1, 4, 2]),
("SampleCategorical", 3),
("SampleCategorical", 3),
("SampleCategorical", 0),
]
mod = te.create_prim_func(
te_workload.matmul_relu(
n=128,
m=128,
k=128,
in_dtype="float16",
out_dtype="float32",
)
)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=tvm.target.Target("cuda"),
types=None,
sch_rules=[
multi_level_tiling_tensor_core(),
]
+ get_rules(kind="cuda", types=ms.schedule_rule.AutoInline),
)
check_sketches(
mod,
sketches=actual,
expected_mods=[matmul_relu_0],
expected_decisions=[decision_0],
)
def test_matmul_relu_with_fallback():
@T.prim_func
def matmul_relu_fallback_0(A: T.Buffer[(128, 128), "float16"], B: T.Buffer[(128, 128), "float16"], compute: T.Buffer[(128, 128), "float32"]) -> None:
T.func_attr({ |
"global_symbol": "main", "tir.noalias": True})
C_reindex_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
C_reindex_shared_wmma_accumulator = T.alloc_buffer([128, 128], dtype="float32", scope="wmma.accumulator")
A_reindex_shared = T.alloc_buffer([128, 128], dtype="float16", scope="shared")
B_reindex_shared = T.alloc_buffer([128, 128], dtype="float16", scope="shared")
A_reindex_shared_wmma_matrix_a = T.alloc_buffer([128, 128], dtype="float16", scope="wmma.matrix_a")
B_reindex_shared_wmma_matrix_b = T.alloc_buffer([128, 128], dtype="float16", scope="wmma.matrix_b")
for ax0_0_0_ax1_0_0_fused in T.thread_binding(2, thread="blockIdx.y"):
for ax0_0_1_ax1_0_1_fused in T.thread_binding(2, thread="blockIdx.x"):
for ax0_0_2_ax1_0_2_fused in T.thread_binding(2, thread="threadIdx.y"):
for ax2_0_0 in T.serial(2):
for ax0_ax1_fused in T.serial(2048):
with T.block("A_reindex_shared"):
v0 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused * 64 + ax0_0_1_ax1_0_1_fused * 32 + ax0_ax1_fused
v1 = T.axis.spatial(128, ax2_0_0 * 64 + ax0_ax1_fused % 64)
T.reads(A[v0, v1])
T.writes(A_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":4})
A_reindex_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused in T.serial(8192):
with T.block("B_reindex_shared"):
v0 = T.axis.spatial(128, ax2_0_0 * 64 + ax0_ax1_fused
v1 = T.axis.spatial(128, ax0_ax1_fused % 128)
T.reads(B[v0, v1])
T.writes(B_reindex_shared[v0, v1])
T.bl |
ock_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":2})
B_reindex_shared[v0, v1] = B[v0, v1]
for ax2_0_1 in T.serial(1):
for ax0_0, ax1_0 in T.grid(2, 4):
with T.block("A_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0)
v1_o = T.axis.spatial(8, ax2_0_0 * 4 + ax1_0)
T.reads(A_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_a"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("A_reindex_shared_wmma.matrix_a"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0, ax1_0 in T.grid(4, 4):
with T.block("B_reindex_shared_wmma.matrix_b_o"):
v0_o = T.axis.spatial(8, ax2_0_0 * 4 + ax0_0)
v1_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused * 4 + ax1_0)
T.reads(B_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(B_ |
reindex_shared_wmma_matrix_b[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_b"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("B_reindex_shared_wmma.matrix_b"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 1, 4, 2, 4):
with T.block("C_o"):
v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_3 * 2 + ax0_0_4)
v1_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused * 4 + ax1_0_3 * 4 + ax1_0_4)
v2_o = T.axis.reduce(8, ax2_0_0 * 4 + ax2_0_1 * 4 + ax2_0_2)
T.reads(A_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16], B_reindex_shared_wmma_matrix_b[v2_o * 16 : v2_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_f32", "warp_execution":1})
with T.init():
for ax0_1, ax1_1 in T.grid(16, 16): |
with T.block("C_init"):
v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads()
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init])
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init] = T.float32(0)
for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16):
with T.block("C"):
v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1])
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i], A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] + T.cast(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], "float32") * T.cast(B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i], "float32")
for ax0_0, ax1_0 in T.grid(2, 4):
with T.block("C_reindex_shared_wmma.accumulator_o"):
v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0)
v1_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused * 4 + ax1_0)
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16]) |
T.writes(C_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_f32_shared"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_reindex_shared_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
C_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_ax1_fused in T.serial(4096):
with T.block("C_reindex_shared"):
v0 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused * 64 + ax0_0_1_ax1_0_1_fused * 32 + ax0_ax1_fused
v1 = T.axis.spatial(128, ax0_ax1_fused % 128)
T.reads(C_reindex_shared[v0, v1])
T.writes(compute[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch":4})
compute[v0, v1] = T.max(C_reindex_shared[v0, v1], T.float32(0))
decision_0 = [
("SamplePerfectTile", [2, 2, 1, 1, 2]),
("SamplePerfectTile", [1, 1, 2, 1, 4]),
("SamplePerfectTile", [2, 1, 4]),
("SampleCategorical", 3),
("SampleCategorical", 2),
("SampleCategorical", 1),
]
mod = te.create_prim_func(
te_workload.matmul_relu(
n=128,
m=128,
k=128,
in_dtype="float16",
out_dtype="float32",
)
)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=tvm.target.Target("cuda"),
types=None,
sch_rules=[
multi_level |
_tiling_tensor_core(),
]
+ get_rules(
"cuda",
(
ms.schedule_rule.MultiLevelTiling,
ms.schedule_rule.AutoInline,
),
),
)
check_sketches(
mod,
sketches=actual,
expected_mods=[matmul_relu_fallback_0],
expected_decisions=[decision_0],
)
def test_conv2d():
@T.prim_func
def conv2d_0(inputs: T.Buffer[(1, 16, 16, 32), "float16"], weight: T.Buffer[(3, 3, 32, 32), "float16"], conv2d_nhwc: T.Buffer[(1, 16, 16, 32), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
PadInput = T.alloc_buffer([1, 18, 18, 32], dtype="float16")
conv2d_nhwc_reindex_shared = T.alloc_buffer([256, 32], dtype="float32", scope="shared")
conv2d_nhwc_reindex_shared_wmma_accumulator = T.alloc_buffer([256, 32], dtype="float32", scope="wmma.accumulator")
PadInput_reindex_shared = T.alloc_buffer([256, 288], dtype="float16", scope="shared")
weight_reindex_shared = T.alloc_buffer([288, 32], dtype="float16", scope="shared")
PadInput_reindex_shared_wmma_matrix_a = T.alloc_buffer([256, 288], dtype="float16", scope="wmma.matrix_a")
weight_reindex_shared_wmma_matrix_b = T.alloc_buffer([288, 32], dtype="float16", scope="wmma.matrix_b")
for i0, i1, i2, i3 in T.grid(1, 18, 18, 32):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 1, i2_1 - 1, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(1 <= i1_1 and i1_1 < 17 and 1 <= i2_1 and i2_1 < 17, inputs[i0_1, i1_1 - 1, i2_1 - 1, i3_1], T.float16(0), dtype="float16")
for ax0_0_0_ax1_0_0_fused in T.thread_binding(2, thread="blockIdx.y"):
for ax0_0_1_ax1_0_1_fused in T.thread_binding(16, thread="blockIdx.x"):
for ax0_0_2_ax1_0_2_fused in T.thread_ |
binding(1, thread="threadIdx.y"):
for ax2_0_0 in T.serial(1):
for ax0_ax1_fused in T.serial(4608):
with T.block("PadInput_reindex_shared"):
v0 = T.axis.spatial(256, ax0_0_1_ax1_0_1_fused * 16 + ax0_ax1_fused
v1 = T.axis.spatial(288, ax0_ax1_fused % 288)
T.reads(PadInput[v0
T.writes(PadInput_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":2})
PadInput_reindex_shared[v0, v1] = PadInput[v0
for ax0_ax1_fused in T.serial(4608):
with T.block("weight_reindex_shared"):
v0 = T.axis.spatial(288, ax0_ax1_fused
v1 = T.axis.spatial(32, ax0_0_0_ax1_0_0_fused * 16 + ax0_ax1_fused % 16)
T.reads(weight[v0
T.writes(weight_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "meta_schedule.cooperative_fetch":8})
weight_reindex_shared[v0, v1] = weight[v0
for ax2_0_1 in T.serial(18):
for ax0_0, ax1_0 in T.grid(1, 1):
with T.block("PadInput_reindex_shared_wmma.matrix_a_o"):
v0_o, v1_o = T.axis.remap("SS", [ax0_0_1_ax1_0_1_fused, ax2_0_1])
T.reads(PadInput_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_a"}) |
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("PadInput_reindex_shared_wmma.matrix_a"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(PadInput_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = PadInput_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0, ax1_0 in T.grid(1, 1):
with T.block("weight_reindex_shared_wmma.matrix_b_o"):
v0_o, v1_o = T.axis.remap("SS", [ax2_0_1, ax0_0_0_ax1_0_0_fused])
T.reads(weight_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(weight_reindex_shared_wmma_matrix_b[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_b"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("weight_reindex_shared_wmma.matrix_b"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(weight_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(weight_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
weight_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = weight_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 1, 1, 1, 1): |
with T.block("conv2d_nhwc_o"):
v0_o = T.axis.spatial(16, ax0_0_4 + ax0_0_1_ax1_0_1_fused + ax0_0_3)
v1_o = T.axis.spatial(2, ax0_0_0_ax1_0_0_fused + ax1_0_3 + ax1_0_4)
v2_o = T.axis.reduce(18, ax2_0_0 * 18 + ax2_0_1 + ax2_0_2)
T.reads(PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16], weight_reindex_shared_wmma_matrix_b[v2_o * 16 : v2_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_f32", "warp_execution":1})
with T.init():
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_init"):
v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads()
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init])
conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init] = T.float32(0)
for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16):
with T.block("conv2d_nhwc"):
v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i], PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 |
+ v0_i, v2_o * 16 + v2_i], weight_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] + T.cast(PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], "float32") * T.cast(weight_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i], "float32")
for ax0_0, ax1_0 in T.grid(1, 1):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator_o"):
v0_o, v1_o = T.axis.remap("SS", [ax0_0_1_ax1_0_1_fused, ax0_0_0_ax1_0_0_fused])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_f32_shared"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_ax1_fused in T.seria |
l(256):
with T.block("conv2d_nhwc_reindex_shared"):
v0 = T.axis.spatial(256, ax0_0_1_ax1_0_1_fused * 16 + ax0_ax1_fused
v1 = T.axis.spatial(32, ax0_0_0_ax1_0_0_fused * 16 + ax0_ax1_fused % 16)
T.reads(conv2d_nhwc_reindex_shared[v0, v1])
T.writes(conv2d_nhwc[v0
T.block_attr({"meta_schedule.cooperative_fetch":3})
conv2d_nhwc[v0
decision_0 = [
("SamplePerfectTile", [1, 16, 1, 1, 1]),
("SamplePerfectTile", [2, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 18, 1]),
("SampleCategorical", 2),
("SampleCategorical", 1),
("SampleCategorical", 3),
]
mod = te.create_prim_func(
te_workload.conv2d_nhwc(
N=1,
H=16,
W=16,
CI=32,
CO=32,
kernel_size=3,
stride=1,
padding=1,
in_dtype="float16",
out_dtype="float32",
)
)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=tvm.target.Target("cuda"),
types=None,
sch_rules=[
multi_level_tiling_tensor_core(),
],
)
check_sketches(
mod,
sketches=actual,
expected_mods=[conv2d_0],
expected_decisions=[decision_0],
)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=tvm.target.Target("cuda"),
types=None,
sch_rules=[
multi_level_tiling_tensor_core(
in_dtype="float16",
out_dtype=["float16", "float32"],
),
],
)
check_sketches(
mod,
sketches=actual,
expected_mods=[conv2d_0],
expected_decisions=[decision_0],
)
def test_matmul_relu_pipeline():
@T.prim_func
def matmul_relu_pipeline_0(A: T.Buffer[(128, 128), "float16"], B: T.Buffer[(128, 128) |
, "float16"], compute: T.Buffer[(128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C = T.alloc_buffer([128, 128], dtype="float32")
C_reindex_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
C_reindex_shared_wmma_accumulator = T.alloc_buffer([128, 128], dtype="float32", scope="wmma.accumulator")
A_reindex_shared = T.alloc_buffer([128, 128], dtype="float16", scope="shared")
B_reindex_shared = T.alloc_buffer([128, 128], dtype="float16", scope="shared")
A_reindex_shared_wmma_matrix_a = T.alloc_buffer([128, 128], dtype="float16", scope="wmma.matrix_a")
B_reindex_shared_wmma_matrix_b = T.alloc_buffer([128, 128], dtype="float16", scope="wmma.matrix_b")
for ax0_0_0_ax1_0_0_fused in T.thread_binding(1, thread="blockIdx.y"):
for ax0_0_1_ax1_0_1_fused in T.thread_binding(16, thread="blockIdx.x"):
for ax0_0_2_ax1_0_2_fused in T.thread_binding(1, thread="threadIdx.y"):
for ax2_0_0 in T.serial(4, annotations={"software_pipeline_order":[0, 3, 1, 4, 5, 2, 6], "software_pipeline_stage":[0, 0, 0, 0, 0, 1, 1]}):
for ax0_ax1_fused in T.serial(1024):
with T.block("A_reindex_shared"):
v0 = T.axis.spatial(128, ax0_0_1_ax1_0_1_fused
v1 = T.axis.spatial(128, ax2_0_0 * 32 + ax0_ax1_fused % 32)
T.reads(A[v0, v1])
T.writes(A_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "double_buffer_scope":0, "meta_schedule.cooperative_fetch":4, "tir.manifest_shared_memory_local_stage":1})
A_reindex_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused in T.serial(1024):
with T.block("B_reindex_shared"):
v0 = |
T.axis.spatial(128, ax2_0_0 * 32 + ax0_ax1_fused
v1 = T.axis.spatial(128, ax0_0_1_ax1_0_1_fused % 4 * 32 + ax0_ax1_fused % 32)
T.reads(B[v0, v1])
T.writes(B_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 8]], "double_buffer_scope":0, "meta_schedule.cooperative_fetch":2, "tir.manifest_shared_memory_local_stage":1})
B_reindex_shared[v0, v1] = B[v0, v1]
for ax2_0_1 in T.serial(2, annotations={"software_pipeline_order":[0, 1, 2], "software_pipeline_stage":[0, 0, 1]}):
for ax0_0, ax1_0 in T.grid(2, 1):
with T.block("A_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(8, ax0_0_1_ax1_0_1_fused
v1_o = T.axis.spatial(8, ax2_0_0 * 2 + ax2_0_1)
T.reads(A_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_a"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("A_reindex_shared_wmma.matrix_a"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0, ax1_0 in T.gri |
d(1, 2):
with T.block("B_reindex_shared_wmma.matrix_b_o"):
v0_o = T.axis.spatial(8, ax2_0_0 * 2 + ax2_0_1)
v1_o = T.axis.spatial(8, ax0_0_1_ax1_0_1_fused % 4 * 2 + ax1_0)
T.reads(B_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_f16_b"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("B_reindex_shared_wmma.matrix_b"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 1, 1, 2, 2):
with T.block("C_o"):
v0_o = T.axis.spatial(8, ax0_0_1_ax1_0_1_fused
v1_o = T.axis.spatial(8, ax0_0_1_ax1_0_1_fused % 4 * 2 + ax1_0_3 * 2 + ax1_0_4)
v2_o = T.axis.reduce(8, ax2_0_0 * 2 + ax2_0_1 + ax2_0_2)
T.reads(A_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16], B_reindex_shared_wmma_matrix_b[v2_o * 16 : v2_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 1 |
6 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_f32", "warp_execution":1})
with T.init():
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_init"):
v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads()
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init])
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init] = T.float32(0)
for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16):
with T.block("C"):
v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1])
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i], A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] + T.cast(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], "float32") * T.cast(B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i], "float32")
for ax0_0, ax1_0 in T.grid(2, 2): |
with T.block("C_reindex_shared_wmma.accumulator_o"):
v0_o = T.axis.spatial(8, ax0_0_1_ax1_0_1_fused
v1_o = T.axis.spatial(8, ax0_0_1_ax1_0_1_fused % 4 * 2 + ax1_0)
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_f32_shared"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_reindex_shared_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
C_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0_ax1_fused in T.grid(1024):
with T.block("C_reindex_shared"):
v0 = T.axis.spatial(128, ax0_0_1_ax1_0_1_fused
v1 = T.axis.spatial(128, ax0_0_1_ax1_0_1_fused % 4 * 32 + ax0_ax1_fused % 32)
T.reads(C_reindex_shared[v0, v1])
T.writes(C[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch":3})
C[v0, v1] = C_reindex_shared[v0, v1]
for i0, i1 in T.grid(128, 128):
with T.block("compute"):
i0_1, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(C[i0_1, i1_1])
T.writes(compute[i0_1, i1_1])
compute[i0_1, i1_1] = T.max(C[i0_1, i1_1], T.float32(0))
decision_0 = [
("SamplePerfectTile", [1, 4, 1, |
1, 2]),
("SamplePerfectTile", [1, 4, 1, 1, 2]),
("SamplePerfectTile", [4, 2, 1]),
("SampleCategorical", 2),
("SampleCategorical", 2),
("SampleCategorical", 1),
]
mod = te.create_prim_func(
te_workload.matmul_relu(
n=128,
m=128,
k=128,
in_dtype="float16",
out_dtype="float32",
)
)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=tvm.target.Target("cuda"),
types=None,
sch_rules=[
multi_level_tiling_tensor_core(
use_software_pipeline=True,
),
],
)
check_sketches(
mod,
sketches=actual,
expected_mods=[matmul_relu_pipeline_0],
expected_decisions=[decision_0],
)
def test_matmul_relu_global():
@T.prim_func
def matmul_relu_global_0(A: T.Buffer[(128, 128), "float16"], B: T.Buffer[(128, 128), "float16"], compute: T.Buffer[(128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C = T.alloc_buffer([128, 128], dtype="float32")
C_reindex_wmma_accumulator = T.alloc_buffer([128, 128], dtype="float32", scope="wmma.accumulator")
A_reindex_shared = T.alloc_buffer([128, 128], dtype="float16", scope="shared")
B_reindex_shared = T.alloc_buffer([128, 128], dtype="float16", scope="shared")
A_reindex_shared_wmma_matrix_a = T.alloc_buffer([128, 128], dtype="float16", scope="wmma.matrix_a")
B_reindex_shared_wmma_matrix_b = T.alloc_buffer([128, 128], dtype="float16", scope="wmma.matrix_b")
for ax0_0_0_ax1_0_0_fused in T.thread_binding(1, thread="blockIdx.y"):
for ax0_0_1_ax1_0_1_fused in T.thread_binding(1, thread="blockIdx.x"):
for ax0_0_2_ax1_0_2_fused in T.thread_binding(16, thread="threadIdx.y"):
for ax2_0_0 in T.serial(2):
for ax0_ax1_fused in T.serial(819 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.