text
stringlengths 1
2.05k
|
---|
=[8192, 2])
sch.annotate(block_or_loop=b44, ann_key="meta_schedule.cooperative_fetch", ann_val=v53)
sch.reverse_compute_at(block=b1, loop=l33, preserve_unit_loops=True)
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, WarpExecutionAfterRewrite)
if __name__ == "__main__":
tvm.testing.main() |
import tvm |
import tvm.testing
from tvm |
import meta_schedule as ms
from tvm.script |
import tir as T
from tvm.target |
import Target
def _target() -> Target:
return Target("cuda", host="llvm")
def _create_context(mod, target) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[
ms.postproc.RewriteLayout(),
],
mutator_probs={},
),
task_name="test",
)
return ctx |
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
def transform(self):
def inner(mod):
target = Target("cuda", host="llvm")
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[
ms.postproc.RewriteLayout(),
],
mutator_probs={},
),
task_name="test",
)
sch = tvm.tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
return sch.mod
return inner |
class TestTIRMatmul(BaseBeforeAfter):
"""Main functionality test
A new block should be inserted to transform the layout, with the
compute block operating on the temporary transformed buffer.
"""
def before(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
T.func_attr({"layout_free_buffers": [1]})
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.S(16, i0 * 4 + i1)
vj = T.axis.S(16, j)
vk = T.axis.R(16, k0 * 4 + k1)
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
def expected(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
T.func_attr({"layout_free_buffers": [1]})
B_reindex = T.alloc_buffer([16, 4, 4], dtype="float32")
for ax0, ax1 in T.grid(16, 16):
with T.block("layout_rewrite"):
i0, i1 = T.axis.remap("SS", [ax0, ax1])
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
B_reindex[i1, i0
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.spatial(16, i0 * 4 + i1)
vj = T.axis.spatial(16, j)
vk = T.axis.reduce(16, k0 * 4 + k1)
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B_reindex[vj, vk |
class TestRewrittenBuffersMustOccurWithinBlock(BaseBeforeAfter):
"""Buffers must occur within a Block"""
def before(
A: T.Buffer[(16, 16), "float32"],
) -> None:
T.func_attr({"layout_free_buffers": [0]})
for i, j in T.grid(16, 16):
T.evaluate(A[i, j])
expected = tvm.TVMError |
class TestExtentOne(BaseBeforeAfter):
"""Buffers with dimensions of extent 1 can be transformed
Regression test for a previous bug, in which the removal of
trivial variables resulted in an error in `IndexMap::Inverse`.
"""
def before(
A: T.Buffer[(16, 1), "float32"],
) -> None:
T.func_attr({"layout_free_buffers": [0]})
for i, j in T.grid(16, 1):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
T.evaluate(A[vi, vj])
def expected(A: T.Buffer[(16, 1), "float32"]):
T.func_attr({"layout_free_buffers": [0]})
A_global = T.alloc_buffer([16], dtype="float32")
for ax0, ax1 in T.grid(16, 1):
with T.block("A_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
A_global[v0] = A[v0, v1]
for i, j in T.grid(16, 1):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
T.evaluate(A_global[vi])
@T.prim_func
def tir_matmul(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
T.func_attr({"layout_free_buffers": [1]})
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.S(16, i0 * 4 + i1)
vj = T.axis.S(16, j)
vk = T.axis.R(16, k0 * 4 + k1)
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@T.prim_func
def rewritten_tir_matmul(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
T.func_attr({"layout_free_buffers": [1]})
B_reindex = T.alloc_buffer([16, 4, 4], dtype="float32")
for ax0, ax1 in T.grid(16, 16):
with T.block("layout_rewrite"):
i0, i1 = T.axis.remap("SS", [ax0, ax1])
T.bloc |
k_attr({"meta_schedule.layout_rewrite_preproc": True})
B_reindex[i1, i0
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.spatial(16, i0 * 4 + i1)
vj = T.axis.spatial(16, j)
vk = T.axis.reduce(16, k0 * 4 + k1)
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B_reindex[vj, vk
def test_layout_rewrite():
target = _target()
ctx = _create_context(tir_matmul, target)
sch = tvm.tir.Schedule(tir_matmul, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod["main"], rewritten_tir_matmul)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm.meta_schedule.postproc |
import RewriteParallelVectorizeUnroll
from tvm.script |
import tir as T
from tvm.tir.schedule |
import Schedule
@tvm.script.ir_module
class Move_PUV:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, [1024, 1024, 1024], dtype="float32")
B = T.match_buffer(b, [1024, 1024, 1024], dtype="float32")
with T.block("root"):
T.block_attr({"meta_schedule.parallel":128, "meta_schedule.vectorize":32})
for i0, j0, i1, j1, k0, i2, j2, k1 in T.grid(128, 64, 4, 4, 64, 4, 8, 32):
with T.block("move"):
vi = T.axis.spatial(1024, i0 * 16 + i1 * 4 + i2)
vj = T.axis.spatial(1024, j0 * 32 + j1 * 8 + j2)
vk = T.axis.spatial(1024, k0 * 32 + k1)
T.where((i0 * 4 + i1) * 4 + i2 < 1024 and (j0 * 4 + j1) * 8 + j2 < 1024 and k0 * 32 + k1 < 1024)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk]
@T.prim_func
def Move_PUV0(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, [1024, 1024, 1024], dtype="float32")
B = T.match_buffer(b, [1024, 1024, 1024], dtype="float32")
with T.block("root"):
for i0_j0_fused in T.parallel(0, 8192):
for i1, j1, k0, i2, j2 in T.grid(4, 4, 64, 4, 8):
for k1_fused in T.vectorized(0, 32):
with T.block("move"):
vi = T.axis.spatial(1024, i0_j0_fused
vj = T.axis.spatial(1024, i0_j0_fused % 64 * 32 + j1 * 8 + j2)
vk = T.axis.spatial(1024, k0 * 32 + k1_fused)
T.where(
i0_j0_fused
and i0_j0_fused % 64 * 32 + j1 * 8 + j2 < 1024
and k0 * 32 + k1_fused < 1024
)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]]) |
B[vi, vj, vk] = A[vi, vj, vk]
@tvm.script.ir_module
class Fused_NN_Dense:
@T.prim_func
def main(placeholder: T.Buffer[(64, 768), "float32"], placeholder_1: T.Buffer[(768, 768), "float32"], T_matmul_NT: T.Buffer[(64, 768), "float32"]) -> None:
for i0, i1, i2 in T.grid(64, 768, 768):
with T.block("T_matmul_NT"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(placeholder[i, k], placeholder_1[j, k])
T.writes(T_matmul_NT[i, j])
with T.init():
T_matmul_NT[i, j] = T.float32(0)
T_matmul_NT[i, j] = T_matmul_NT[i, j] + placeholder[i, k] * placeholder_1[j, k]
@T.prim_func
def before_matmul_vectorize(
placeholder: T.Buffer[(64, 768), "float32"],
placeholder_1: T.Buffer[(768, 768), "float32"],
T_matmul_NT: T.Buffer[(64, 768), "float32"],
) -> None:
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.vectorize":64})
T_matmul_NT_global = T.alloc_buffer([64, 768], dtype="float32")
for i0_0, i1_0, i0_1, i1_1 in T.grid(1, 16, 1, 3):
for i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(48, 8, 1, 16, 8, 16):
with T.block("T_matmul_NT"):
i = T.axis.spatial(64, i0_2 * 8 + i0_3)
j = T.axis.spatial(768, i1_0 * 48 + i1_1 * 16 + i1_3)
k = T.axis.reduce(768, i2_0 * 16 + i2_1)
T.reads(placeholder[i, k], placeholder_1[j, k])
T.writes(T_matmul_NT_global[i, j])
with T.init():
T_matmul_NT_global[i, j] = T.float32(0)
T_matmul_NT_global[i, j] = T_matmul_NT_global[i, j] + placeholder[i, k] * placeholder_1[j, k]
for ax0, ax1 in T.grid(64, 16):
with T.block("T_matmul_NT_global"):
v0 = T.axis.spatial(64, ax0)
v1 = T.axis.spatial(768, i1_0 * 48 + i1_1 * 16 + ax1) |
T.reads(T_matmul_NT_global[v0, v1])
T.writes(T_matmul_NT[v0, v1])
T_matmul_NT[v0, v1] = T_matmul_NT_global[v0, v1]
@T.prim_func
def after_matmul_vectorize(
placeholder: T.Buffer[(64, 768), "float32"],
placeholder_1: T.Buffer[(768, 768), "float32"],
T_matmul_NT: T.Buffer[(64, 768), "float32"],
) -> None:
T_matmul_NT_global = T.alloc_buffer([64, 768], dtype="float32")
for i0_0, i1_0, i0_1, i1_1 in T.grid(1, 16, 1, 3):
for i2_0, i0_2, i1_2, i2_1, i0_3 in T.grid(48, 8, 1, 16, 8):
for i1_3_fused in T.vectorized(16):
with T.block("T_matmul_NT"):
i = T.axis.spatial(64, i0_2 * 8 + i0_3)
j = T.axis.spatial(768, i1_0 * 48 + i1_1 * 16 + i1_3_fused)
k = T.axis.reduce(768, i2_0 * 16 + i2_1)
T.reads(placeholder[i, k], placeholder_1[j, k])
T.writes(T_matmul_NT_global[i, j])
with T.init():
T_matmul_NT_global[i, j] = T.float32(0)
T_matmul_NT_global[i, j] = T_matmul_NT_global[i, j] + placeholder[i, k] * placeholder_1[j, k]
for ax0 in T.serial(64):
for ax1_fused in T.vectorized(16):
with T.block("T_matmul_NT_global"):
v0 = T.axis.spatial(64, ax0)
v1 = T.axis.spatial(768, i1_0 * 48 + i1_1 * 16 + ax1_fused)
T.reads(T_matmul_NT_global[v0, v1])
T.writes(T_matmul_NT[v0, v1])
T_matmul_NT[v0, v1] = T_matmul_NT_global[v0, v1]
def test_meta_schedule_postproc_rewrite_parallel_unroll_vectorize():
postproc = RewriteParallelVectorizeUnroll()
sch = Schedule(Move_PUV)
assert postproc.apply(sch)
mod = tvm.tir.transform.Simplify()(sch.mod)
tvm.ir.assert_structural_equal(mod["main"], Move_PUV0)
def test_vectorize_inner_loop():
sch = Schedule(before_matmul_vectorize)
rule = RewriteParallelVectorizeUnroll()
as |
sert rule.apply(sch)
tvm.ir.assert_structural_equal(sch.mod["main"], after_matmul_vectorize)
if __name__ == "__main__":
test_meta_schedule_postproc_rewrite_parallel_unroll_vectorize()
test_vectorize_inner_loop() |
import tvm
from tvm |
import meta_schedule as ms
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.target |
import Target
def _target() -> Target:
return Target("cuda", host="llvm")
def _create_context(mod, target) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[
ms.postproc.RewriteReductionBlock(),
],
mutator_probs={},
),
task_name="test",
)
return ctx
@tvm.script.ir_module
class Matmul_before_rewrite:
@T.prim_func
def main(var_A: T.handle, var_B: T.handle, var_C: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
C = T.match_buffer(var_C, [512, 512], dtype="float32")
C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local")
A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(0, 16, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(0, 16, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(0, 8, thread="threadIdx.x"):
for i2_0 in T.serial(0, 1):
for ax0_ax1_fused_0 in T.serial(0, 32768):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"):
with T.block("A_shared"):
v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1)
v1 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1) % 512)
T.reads([A[v0, v1]])
T.writes([A_shared[v0, v1]])
T.block_attr({"meta_schedule.cooperative_fetch":1})
A_shared[v0, v1] = A[v0, v1] |
for ax0_ax1_fused_0 in T.serial(0, 1024):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"):
for ax0_ax1_fused_2 in T.vectorized(0, 2):
with T.block("B_shared"):
v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2)
v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2) % 32)
T.reads([B[v0, v1]])
T.writes([B_shared[v0, v1]])
T.block_attr({"meta_schedule.cooperative_fetch":2})
B_shared[v0, v1] = B[v0, v1]
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(16, 2, 2, 32, 16, 2):
with T.block("C"):
i = T.axis.spatial(512, i0_1_i1_1_fused * 32 + i0_3 * 16 + i0_4)
j = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + i1_3 * 2 + i1_4)
k = T.axis.reduce(512, i2_1 * 32 + i2_2)
T.reads([C_local[i, j], A_shared[i, k], B_shared[k, j]])
T.writes([C_local[i, j]])
with T.init():
C_local[i, j] = T.float32(0)
C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j]
for ax0, ax1 in T.grid(32, 4):
with T.block("C_local"):
v0 = T.axis.spatial(512, i0_1_i1_1_fused * 32 + ax0)
v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + ax1)
T.reads([C_local[v0, v1]])
T.writes([C[v0, v1]]) |
C[v0, v1] = C_local[v0, v1]
@tvm.script.ir_module
class Matmul_after_rewrite:
@T.prim_func
def main(var_A: T.handle, var_B: T.handle, var_C: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
C = T.match_buffer(var_C, [512, 512], dtype="float32")
C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local")
A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(0, 16, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(0, 16, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(0, 8, thread="threadIdx.x"):
for i2_0 in T.serial(0, 1):
for ax0_ax1_fused_0 in T.serial(0, 32768):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"):
with T.block("A_shared"):
v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1)
v1 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1) % 512)
T.reads([A[v0, v1]])
T.writes([A_shared[v0, v1]])
T.block_attr({"meta_schedule.cooperative_fetch":1})
A_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused_0 in T.serial(0, 1024):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"):
for ax0_ax1_fused_2 in T.vectorized(0, 2):
with T.block("B_shared"):
v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2) |
v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2) % 32)
T.reads([B[v0, v1]])
T.writes([B_shared[v0, v1]])
T.block_attr({"meta_schedule.cooperative_fetch":2})
B_shared[v0, v1] = B[v0, v1]
for i0_3_init, i1_3_init, i0_4_init, i1_4_init in T.grid(2, 2, 16, 2):
with T.block("C_init"):
i = T.axis.spatial(512, i0_1_i1_1_fused * 32 + i0_3_init * 16 + i0_4_init)
j = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + i1_3_init * 2 + i1_4_init)
T.reads([])
T.writes([C_local[i, j]])
C_local[i, j] = T.float32(0)
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(16, 2, 2, 32, 16, 2):
with T.block("C_update"):
i = T.axis.spatial(512, i0_1_i1_1_fused * 32 + i0_3 * 16 + i0_4)
j = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + i1_3 * 2 + i1_4)
k = T.axis.reduce(512, i2_1 * 32 + i2_2)
T.reads([C_local[i, j], A_shared[i, k], B_shared[k, j]])
T.writes([C_local[i, j]])
C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j]
for ax0, ax1 in T.grid(32, 4):
with T.block("C_local"):
v0 = T.axis.spatial(512, i0_1_i1_1_fused * 32 + ax0)
v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + ax1)
T.reads([C_local[v0, v1]])
T.writes([C[v0, v1]]) |
C[v0, v1] = C_local[v0, v1]
@tvm.script.ir_module
class Softmax_cross_thread_reduction:
@T.prim_func
def main(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 8):
for ax1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax1_0 * 32 + ax1_1)
T.reads(T_softmax_maxelem_shared[i0_1], A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem_shared[i0_1] = T.max(T_softmax_maxelem_shared[i0_1], A[i0_1, k])
for ax0, ax1_0 in T.grid(1, 8):
for ax1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_2 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax1_0 * 32 + ax1_1)
T.reads(T_softmax_expsum_shared[i0_2], A[i0_2, k], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_expsum_shared[i0_2])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(A[i0_2, k] - T_softmax_maxelem_shared[i0_2], dtype="float32")
for i1_0 in T.serial(8):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("T_softmax_norm"): |
i0_3 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 32 + i1_1)
T.reads(A[i0_3, i1], T_softmax_maxelem_shared[i0_3], T_softmax_expsum_shared[i0_3])
T.writes(T_softmax_norm[i0_3, i1])
T.block_attr({"axis":1})
T_softmax_norm[i0_3, i1] = T.exp(A[i0_3, i1] - T_softmax_maxelem_shared[i0_3], dtype="float32") / T_softmax_expsum_shared[i0_3]
def test_rewrite_tiled_matmul():
mod = Matmul_before_rewrite
target = _target()
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Matmul_after_rewrite)
def test_rewrite_softmax():
mod = Softmax_cross_thread_reduction
target = _target()
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Softmax_cross_thread_reduction)
if __name__ == "__main__":
test_rewrite_tiled_matmul()
test_rewrite_softmax() |
import tvm
from tvm |
import meta_schedule as ms
from tvm.script |
import tir as T
from tvm.tir.tensor_intrin |
import arm_cpu, cuda, rocm, x86
@tvm.script.ir_module
class Conv2dNCHWcVNNIModuleTiled:
@T.prim_func
def main(
placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"],
placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"],
conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for (
i0_0,
i1_0,
i2_0,
i3_0,
i4_0_0,
i0_1,
i1_1,
i2_1,
i3_1,
i4_0_1,
i5_0,
i6_0,
i7_0,
i8_0,
i9_0_0,
i0_2,
i1_2,
i2_2,
i3_2,
i4_0_2,
i5_1,
i6_1,
i7_1,
i8_1,
i9_0_1,
i0_3,
i1_3,
i2_3,
i3_3,
i4_0_3,
) in T.grid(
1,
1,
2,
1,
1,
1,
4,
1,
14,
1,
1,
1,
4,
1,
1,
1,
4,
7,
1,
1,
1,
1,
1,
4,
1,
1,
1,
4,
4,
1,
):
with T.block("conv2d_NCHWc_int8_o"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1_1 * 4 + i1_2)
oh = T.axis.spatial(56, i2_0 * 28 + i2_2 * 4 + i2_3)
ow = T.axis.spatial(56, i3_1 * 4 + i3_3)
oc_block_o = T.axis.spatial(1, 0)
kh = T.axis.reduce(1, 0)
kw = T.axis.reduce(1, 0)
ic_outer, ic_f_inner = T.axis.remap("RR", [i7_0, i8_1])
ic_s_inner_o = T.axis.reduce(1, 0)
T.reads(
placeholder[n, ic_outer |
, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:16, 0:4],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16])
T.block_attr({"meta_schedule.auto_tensorize": "dot_16x4_vnni"})
with T.init():
for i4_1 in T.serial(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_init])
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_init] = 0
for i4_1, i9_1 in T.grid(16, 4):
with T.block("conv2d_NCHWc_int8"):
oc_block, ic_s_inner = T.axis.remap("SR", [i4_1, i9_1])
T.reads(
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block],
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[
oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner
],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
"int32",
) * T.cast(
placeholder_1[
oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner
], |
"int32",
)
@tvm.script.ir_module
class Conv2dNCHWcVNNIModuleTensorized:
@T.prim_func
def main(
placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"],
placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"],
conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0_0, i1_0, i2_0, i3_0, i4_0_0, i0_1, i1_1, i2_1, i3_1, i4_0_1, i5_0, i6_0 in T.grid(
1, 1, 2, 1, 1, 1, 4, 1, 14, 1, 1, 1
):
for i1_2_init, i2_2_init, i2_3_init, i3_3_init in T.grid(4, 7, 4, 4):
with T.block("conv2d_NCHWc_int8_o_init"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1_1 * 4 + i1_2_init)
oh = T.axis.spatial(56, i2_0 * 28 + i2_2_init * 4 + i2_3_init)
ow = T.axis.spatial(56, i3_1 * 4 + i3_3_init)
oc_block_o = T.axis.spatial(1, 0)
T.reads()
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16])
for i4_1 in T.vectorized(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_init])
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_init] = 0
for (
i7_0,
i8_0,
i9_0_0,
i0_2,
i1_2,
i2_2,
i3_2,
i4_0_2,
i5_1,
i6_1,
i7_1,
i8_1,
i9_0_1,
i0_3,
i1_3,
i2_3,
i3_3,
i4_0_3,
) in T.grid(4, 1, 1, 1, 4, 7, 1, 1, 1, 1, 1, 4, 1, 1, 1, 4 |
, 4, 1):
with T.block("conv2d_NCHWc_int8_o_update"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1_1 * 4 + i1_2)
oh = T.axis.spatial(56, i2_0 * 28 + i2_2 * 4 + i2_3)
ow = T.axis.spatial(56, i3_1 * 4 + i3_3)
oc_block_o = T.axis.spatial(1, 0)
kh = T.axis.reduce(1, 0)
kw = T.axis.reduce(1, 0)
ic_outer, ic_f_inner = T.axis.remap("RR", [i7_0, i8_1])
ic_s_inner_o = T.axis.reduce(1, 0)
T.reads(
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16],
placeholder[
n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4
],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:16, 0:4],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16])
A = T.match_buffer(
placeholder[
n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4
],
[4],
dtype="uint8",
offset_factor=1,
)
B = T.match_buffer(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:16, 0:4],
[16, 4],
dtype="int8",
offset_factor=1,
)
C = T.match_buffer(
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16],
[16],
dtype="int32",
offset_factor=1,
)
A_u8x4 = A.vload([0], "uint8x4")
A_i32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x64 = B.vload([0, 0], dtype="int8x6 |
4")
B_i32x16 = T.reinterpret(B_i8x64, dtype="int32x16")
C_i32x16 = C.vload([0], dtype="int32x16")
C[T.ramp(0, 1, 16)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.x86.avx512.vpdpbusd.512"),
T.uint32(0),
C_i32x16,
T.broadcast(A_i32, 16),
B_i32x16,
dtype="int32x16",
)
@tvm.script.ir_module
class DenseDP4ATiled:
@T.prim_func
def main(
X: T.Buffer[(128, 128), "int8"],
W: T.Buffer[(128, 128), "int8"],
compute: T.Buffer[(128, 128), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
compute_local = T.alloc_buffer([128, 128], dtype="int32", scope="local")
X_shared = T.alloc_buffer([128, 128], dtype="int8", scope="shared")
W_shared = T.alloc_buffer([128, 128], dtype="int8", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(2, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(2, thread="threadIdx.x"):
for i2_0_0 in T.serial(2):
for ax0_ax1_fused in T.serial(1024):
with T.block("X_shared"):
v0 = T.axis.spatial(
128, i0_0_i1_0_fused
)
v1 = T.axis.spatial(128, i2_0_0 * 64 + ax0_ax1_fused % 64)
T.reads(X[v0, v1])
T.writes(X_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
X_shared[v0, v1] = X[v0, v1]
for ax0_ax1_fused in T.serial(4096):
with T.block("W_shared"): |
v0 = T.axis.spatial(
128, i0_0_i1_0_fused % 2 * 64 + ax0_ax1_fused
)
v1 = T.axis.spatial(128, i2_0_0 * 64 + ax0_ax1_fused % 64)
T.reads(W[v0, v1])
T.writes(W_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
W_shared[v0, v1] = W[v0, v1]
for i2_0_1, i0_3, i1_3, i2_0_2, i0_4, i1_4 in T.grid(2, 4, 16, 8, 4, 1):
with T.block("compute_o"):
i = T.axis.spatial(128, i0_0_i1_0_fused
j = T.axis.spatial(
128,
i0_0_i1_0_fused % 2 * 64
+ i0_1_i1_1_fused * 32
+ i0_2_i1_2_fused * 16
+ i1_3,
)
k_o = T.axis.reduce(32, i2_0_0 * 16 + i2_0_1 * 8 + i2_0_2)
T.reads(
X_shared[i, k_o * 4 : k_o * 4 + 4],
W_shared[j, k_o * 4 : k_o * 4 + 4],
)
T.writes(compute_local[i, j])
T.block_attr({"meta_schedule.auto_tensorize": "dp4a"})
with T.init():
with T.block("compute_init"):
T.reads()
T.writes(compute_local[i, j])
compute_local[i, j] = 0
for i2_1 in T.serial(4):
with T.block("compute"):
k = T.axis.reduce(4, i2_1) |
T.reads(
compute_local[i, j],
X_shared[i, k_o * 4 + k],
W_shared[j, k_o * 4 + k],
)
T.writes(compute_local[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"})
compute_local[i, j] = compute_local[i, j] + T.cast(
X_shared[i, k_o * 4 + k], "int32"
) * T.cast(W_shared[j, k_o * 4 + k], "int32")
for ax0, ax1 in T.grid(16, 16):
with T.block("compute_local"):
v0 = T.axis.spatial(128, i0_0_i1_0_fused
v1 = T.axis.spatial(
128,
i0_0_i1_0_fused % 2 * 64
+ i0_1_i1_1_fused * 32
+ i0_2_i1_2_fused * 16
+ ax1,
)
T.reads(compute_local[v0, v1])
T.writes(compute[v0, v1])
compute[v0, v1] = compute_local[v0, v1]
@tvm.script.ir_module
class DenseDP4ATensorized:
@T.prim_func
def main(
X: T.Buffer[(128, 128), "int8"],
W: T.Buffer[(128, 128), "int8"],
compute: T.Buffer[(128, 128), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
compute_local = T.alloc_buffer([128, 128], dtype="int32", scope="local")
X_shared = T.alloc_buffer([128, 128], dtype="int8", scope="shared")
W_shared = T.alloc_buffer([128, 128], dtype="int8", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_bindi |
ng(2, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(2, thread="threadIdx.x"):
for i0_3_init, i1_3_init, i0_4_init in T.grid(4, 16, 4):
with T.block("compute_o_init"):
i = T.axis.spatial(
128, i0_0_i1_0_fused
)
j = T.axis.spatial(
128,
i0_0_i1_0_fused % 2 * 64
+ i0_1_i1_1_fused * 32
+ i0_2_i1_2_fused * 16
+ i1_3_init,
)
T.reads()
T.writes(compute_local[i, j])
T.block_attr({"meta_schedule.auto_tensorize": ""})
with T.block("compute_init"):
T.reads()
T.writes(compute_local[i, j])
compute_local[i, j] = 0
for i2_0_0 in T.serial(2):
for ax0_ax1_fused in T.serial(1024):
with T.block("X_shared"):
v0 = T.axis.spatial(
128, i0_0_i1_0_fused
)
v1 = T.axis.spatial(128, i2_0_0 * 64 + ax0_ax1_fused % 64)
T.reads(X[v0, v1])
T.writes(X_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
X_shared[v0, v1] = X[v0, v1]
for ax0_ax1_fused in T.serial(4096):
with T.block("W_shared"):
v0 = T.axis.spatial(
128, i0_0_i1_0_fused % 2 * 64 + ax0_ax1_fused
) |
v1 = T.axis.spatial(128, i2_0_0 * 64 + ax0_ax1_fused % 64)
T.reads(W[v0, v1])
T.writes(W_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
W_shared[v0, v1] = W[v0, v1]
for i2_0_1, i0_3, i1_3, i2_0_2, i0_4, i1_4 in T.grid(2, 4, 16, 8, 4, 1):
with T.block("compute_o_update"):
i = T.axis.spatial(128, i0_0_i1_0_fused
j = T.axis.spatial(
128,
i0_0_i1_0_fused % 2 * 64
+ i0_1_i1_1_fused * 32
+ i0_2_i1_2_fused * 16
+ i1_3,
)
k_o = T.axis.reduce(32, i2_0_0 * 16 + i2_0_1 * 8 + i2_0_2)
T.reads(
compute_local[i, j],
X_shared[i, k_o * 4 : k_o * 4 + 4],
W_shared[j, k_o * 4 : k_o * 4 + 4],
)
T.writes(compute_local[i, j])
A = T.match_buffer(
X_shared[i, k_o * 4 : k_o * 4 + 4],
[4],
dtype="int8",
scope="shared",
align=4,
offset_factor=1,
)
B = T.match_buffer(
W_shared[j, k_o * 4 : k_o * 4 + 4],
[4],
dtype="int8",
scope="shared",
ali |
gn=4,
offset_factor=1,
)
C = T.match_buffer(
compute_local[i, j],
[1],
dtype="int32",
scope="local",
align=4,
offset_factor=1,
)
C[0] = C[0] + T.call_pure_extern(
"__dp4a",
A[T.ramp(0, 1, 4)],
B[T.ramp(0, 1, 4)],
0,
dtype="int32",
)
for ax0, ax1 in T.grid(16, 16):
with T.block("compute_local"):
v0 = T.axis.spatial(128, i0_0_i1_0_fused
v1 = T.axis.spatial(
128,
i0_0_i1_0_fused % 2 * 64
+ i0_1_i1_1_fused * 32
+ i0_2_i1_2_fused * 16
+ ax1,
)
T.reads(compute_local[v0, v1])
T.writes(compute[v0, v1])
compute[v0, v1] = compute_local[v0, v1]
def _create_context(mod, target, postprocs) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=postprocs,
mutator_probs={},
),
task_name="test",
)
return ctx
def test_rewrite_tensorize_conv2d_nchwc_vnni():
mod = Conv2dNCHWcVNNIModuleTiled
target = tvm.target.Target("llvm -mcpu=cascadelake -num-cores 4")
ctx = _create_context(
mod,
t |
arget,
[
ms.postproc.RewriteReductionBlock(),
ms.postproc.RewriteTensorize(True),
],
)
sch = tvm.tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
for proc in ctx.space_generator.postprocs:
proc.apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Conv2dNCHWcVNNIModuleTensorized)
def test_rewrite_tensorize_dense_dp4a():
mod = DenseDP4ATiled
target = tvm.target.Target("nvidia/geforce-rtx-3070")
ctx = _create_context(
mod,
target,
[
ms.postproc.RewriteCooperativeFetch(),
ms.postproc.RewriteReductionBlock(),
ms.postproc.RewriteTensorize(),
],
)
sch = tvm.tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
for proc in ctx.space_generator.postprocs:
proc.apply(sch)
tvm.ir.assert_structural_equal(sch.mod, DenseDP4ATensorized)
if __name__ == "__main__":
test_rewrite_tensorize_conv2d_nchwc_vnni()
test_rewrite_tensorize_dense_dp4a() |
import tvm
from tvm |
import meta_schedule as ms
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.target |
import Target
def _target() -> Target:
return Target("cuda --max_threads_per_block=1024", host="llvm")
def _create_context(mod, target) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[ms.postproc.RewriteUnboundBlock()],
mutator_probs={},
),
task_name="test",
)
return ctx
@tvm.script.ir_module
class Before_cooperative_fetch:
@T.prim_func
def main(var_A: T.handle, var_B: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
for i, j in T.grid(512, 512):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + 1.0
@tvm.script.ir_module
class After_cooperative_fetch:
@T.prim_func
def main(var_A: T.handle, var_B: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
for i_j_fused_0 in T.thread_binding(256, thread="blockIdx.x"):
for i_j_fused_1 in T.thread_binding(1024, thread="threadIdx.x"):
with T.block("C"):
vi = T.axis.spatial(512, (i_j_fused_0 * 1024 + i_j_fused_1)
vj = T.axis.spatial(512, (i_j_fused_0 * 1024 + i_j_fused_1) % 512)
B[vi, vj] = A[vi, vj] + 1.0
@tvm.script.ir_module
class Before_norm_bmn:
@T.prim_func
def main(A: T.Buffer[(1, 256, 256), "float32"], D: T.Buffer[(1,), "float32"]) -> None:
C = T.alloc_buffer([1], dtype="float32")
for i0, i1, i2 in T.grid(1, 256, 256):
with T.block("C"):
b, i, j = T.axis.remap("SRR", [i0, i1, i2])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + A[b, i, j] * A[b, i, j]
for i0 in T.serial(1):
w |
ith T.block("D"):
b = T.axis.S(1, i0)
D[b] = T.sqrt(C[b], dtype="float32")
@tvm.script.ir_module
class After_norm_bmn:
@T.prim_func
def main(A: T.Buffer[(1, 256, 256), "float32"], D: T.Buffer[(1,), "float32"]) -> None:
C = T.alloc_buffer([1], dtype="float32")
for i0_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
for i1, i2 in T.grid(256, 256):
with T.block("C"):
b = T.axis.S(1, 0)
i, j = T.axis.remap("RR", [i1, i2])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + A[b, i, j] * A[b, i, j]
for i0_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
with T.block("D"):
b = T.axis.S(1, 0)
D[b] = T.sqrt(C[b], dtype="float32")
@tvm.script.ir_module
class Bert_fused_reshape_transpose_reshape:
@T.prim_func
def main(
placeholder: T.Buffer[(12, 64, 64), "float32"], T_reshape: T.Buffer[(64, 768), "float32"]
) -> None:
for i0_i1_fused_0, i0_i1_fused_1 in T.grid(1536, 32):
with T.block("T_reshape_1"):
ax0 = T.axis.spatial(64, (i0_i1_fused_0 * 32 + i0_i1_fused_1)
ax1 = T.axis.spatial(768, (i0_i1_fused_0 * 32 + i0_i1_fused_1) % 768)
T.reads(placeholder[ax1 % 768
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = placeholder[
((ax1 % 64
(ax1 % 64
ax1 % 64 % 64,
]
@tvm.script.ir_module
class Bert_fused_reshape_transpose_reshape_large:
@T.prim_func
def main(
placeholder: T.Buffer[(12, 64, 64), "float32"], T_reshape: T.Buffer[(64, 768), "float32"]
) -> None:
for i0_i1_fused_0, i |
0_i1_fused_1 in T.grid(1536000, 32):
with T.block("T_reshape_1"):
ax0 = T.axis.spatial(64, (i0_i1_fused_0 * 32 + i0_i1_fused_1)
ax1 = T.axis.spatial(768, (i0_i1_fused_0 * 32 + i0_i1_fused_1) % 768)
T.reads(placeholder[ax1 % 768
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = placeholder[
((ax1 % 64
(ax1 % 64
ax1 % 64 % 64,
]
@tvm.script.ir_module
class Bert_fused_reshape_transpose_reshape_after_rub:
@T.prim_func
def main(
placeholder: T.Buffer[(12, 64, 64), "float32"], T_reshape: T.Buffer[(64, 768), "float32"]
) -> None:
for i0_i1_fused_0_i0_i1_fused_1_fused_0 in T.thread_binding(48, thread="blockIdx.x"):
for i0_i1_fused_0_i0_i1_fused_1_fused_1 in T.thread_binding(1024, thread="threadIdx.x"):
with T.block("T_reshape_1"):
ax0 = T.axis.spatial(
64,
(
(
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_1
)
* 32
+ (
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_1
)
% 32
)
)
ax1 = T.axis.spatial(
768,
(
(
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_1
)
* 32
+ ( |
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_1
)
% 32
)
% 768,
)
T.reads(placeholder[ax1 % 768
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = placeholder[
((ax1 % 64
(ax1 % 64
ax1 % 64 % 64,
]
@tvm.script.ir_module
class Bert_fused_reshape_transpose_reshape_after_rub_large:
@T.prim_func
def main(
placeholder: T.Buffer[(12, 64, 64), "float32"], T_reshape: T.Buffer[(64, 768), "float32"]
) -> None:
for i0_i1_fused_0_i0_i1_fused_1_fused_1 in T.thread_binding(256, thread="blockIdx.x"):
for i0_i1_fused_0_i0_i1_fused_1_fused_2 in T.thread_binding(1024, thread="threadIdx.x"):
for i0_i1_fused_0_i0_i1_fused_1_fused_0 in T.serial(188):
with T.block("T_reshape_1"):
ax0 = T.axis.spatial(
64,
(
(
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 262144
+ i0_i1_fused_0_i0_i1_fused_1_fused_1 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_2
)
* 32
+ (
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 262144
+ i0_i1_fused_0_i0_i1_fused_1_fused_1 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_2
)
% 32
)
) |
ax1 = T.axis.spatial(
768,
(
(
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 262144
+ i0_i1_fused_0_i0_i1_fused_1_fused_1 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_2
)
* 32
+ (
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 262144
+ i0_i1_fused_0_i0_i1_fused_1_fused_1 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_2
)
% 32
)
% 768,
)
T.where(
(
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 256
+ i0_i1_fused_0_i0_i1_fused_1_fused_1
)
* 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_2
< 49152000
)
T.reads(placeholder[ax1 % 768
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = placeholder[
((ax1 % 64
% 12,
(ax1 % 64
ax1 % 64 % 64,
]
@T.prim_func
def before_unrolled_loop(
placeholder: T.Buffer[(1, 56, 56, 64), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bgemm = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
inverse = T.alloc_buffer([4, 4, 196, 64], dtype="float32")
for i2_0, i3_0, i2_1, i3_1 in T.grid(98, 4, 2, 16):
for i0 in T.unroll(4): |
for i1 in T.unroll(4):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("inverse"):
vh, vw = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(196, i2_0 * 2 + i2_1)
co = T.axis.spatial(64, i3_0 * 16 + i3_1)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(bgemm[r_a, r_b, p, co])
T.writes(inverse[vh, vw, p, co])
with T.init():
inverse[vh, vw, p, co] = T.float32(0)
inverse[vh, vw, p, co] = inverse[vh, vw, p, co] + bgemm[r_a, r_b, p, co]
@T.prim_func
def after_unrolled_loop(
placeholder: T.Buffer[(1, 56, 56, 64), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bgemm = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
inverse = T.alloc_buffer([4, 4, 196, 64], dtype="float32")
for i2_0_i3_0_i2_1_i3_1_fused_0 in T.thread_binding(13, thread="blockIdx.x"):
for i2_0_i3_0_i2_1_i3_1_fused_1 in T.thread_binding(1024, thread="threadIdx.x"):
for i0 in T.unroll(4):
for i1 in T.unroll(4):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("inverse"):
vh, vw = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(
196,
(
i2_0_i3_0_i2_1_i3_1_fused_0 * 1024
+ i2_0_i3_0_i2_1_i3_1_fused_1
)
* 2
+ (
i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 |
+ i2_0_i3_0_i2_1_i3_1_fused_1
)
% 32
)
co = T.axis.spatial(
64,
(
i2_0_i3_0_i2_1_i3_1_fused_0 * 1024
+ i2_0_i3_0_i2_1_i3_1_fused_1
)
% 128
* 16
+ (
i2_0_i3_0_i2_1_i3_1_fused_0 * 1024
+ i2_0_i3_0_i2_1_i3_1_fused_1
)
% 16,
)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.where(
i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1
< 12544
)
T.reads(bgemm[r_a, r_b, p, co])
T.writes(inverse[vh, vw, p, co])
with T.init():
inverse[vh, vw, p, co] = T.float32(0)
inverse[vh, vw, p, co] = (
inverse[vh, vw, p, co] + bgemm[r_a, r_b, p, co]
)
def test_rewrite_cooperative_fetch():
mod = Before_cooperative_fetch
target = _target()
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, After_cooperative_fetch)
def test_rewrite_norm_bmn():
mod = Befor |
e_norm_bmn
target = _target()
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, After_norm_bmn)
def test_rewrite_cuda_loop_split_no_reduction():
mod = Bert_fused_reshape_transpose_reshape
target = Target("nvidia/nvidia-v100", host="llvm")
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Bert_fused_reshape_transpose_reshape_after_rub)
def test_rewrite_cuda_loop_split_no_reduction_large():
mod = Bert_fused_reshape_transpose_reshape_large
target = Target("nvidia/nvidia-v100", host="llvm")
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Bert_fused_reshape_transpose_reshape_after_rub_large)
def test_rewrite_cuda_loop_split_for_kind():
mod = before_unrolled_loop
target = Target("nvidia/nvidia-v100", host="llvm")
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod["main"], after_unrolled_loop)
if __name__ == "__main__":
test_rewrite_cooperative_fetch()
test_rewrite_norm_bmn()
test_rewrite_cuda_loop_split_no_reduction()
test_rewrite_cuda_loop_split_no_reduction_large()
test_rewrite_cuda_loop_split_for_kind() |
import pytest |
import tvm |
import tvm.testing
from tvm |
import meta_schedule as ms
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.target |
import Target
def _target() -> Target:
return Target("nvidia/geforce-rtx-3080")
def _create_context(mod, target) -> ms.TuneContext:
return ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[ms.postproc.VerifyGPUCode()],
mutator_probs={},
),
task_name="test",
)
@tvm.script.ir_module
class Conv2dCuda0:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "T.noalias": True})
threadIdx_x = T.env_thread("threadIdx.x")
threadIdx_y = T.env_thread("threadIdx.y")
blockIdx_x = T.env_thread("blockIdx.x")
blockIdx_y = T.env_thread("blockIdx.y")
blockIdx_z = T.env_thread("blockIdx.z")
A = T.match_buffer(a, [14*14*256*256], dtype="float32")
B = T.match_buffer(b, [14*14*512*256], dtype="float32")
T.launch_thread(blockIdx_z, 196)
B_local = T.decl_buffer([64], "float32", scope="local")
Apad_shared = T.decl_buffer([512], "float32", scope="shared")
Apad_shared_local = T.decl_buffer([8], "float32", scope="local")
T.launch_thread(blockIdx_y, 8)
T.launch_thread(blockIdx_x, 4)
T.launch_thread(threadIdx_y, 8)
T.launch_thread(threadIdx_x, 8)
for ff_c_init, nn_c_init in T.grid(8, 8):
B_local[ff_c_init * 8 + nn_c_init] = T.float32(0)
for rc_outer, ry, rx in T.grid(32, 3, 3):
for ax3_inner_outer in T.serial(0, 2):
Apad_shared[T.ramp(threadIdx_y * 64 + threadIdx_x * 8 + ax3_inner_outer * 4, 1, 4)] = T.if_then_else(
1 <= blockIdx_z
A[T.ramp(ry * 917504 + blockIdx_z * 65536 + rx * 65536 + rc_outer * 2048 + threadIdx_y * 256 + blockIdx_x * 64 + threadIdx_x * 8 + ax3_inner_outer * 4 - 983040, 1, 4)],
T.broadcast(T.float32(0), 4),
dtype="float32x4", |
)
for rc_inner in T.serial(0, 8):
for ax3 in T.serial(0, 8):
Apad_shared_local[ax3] = Apad_shared[rc_inner * 64 + threadIdx_x * 8 + ax3]
for ff_c, nn_c in T.grid(8, 8):
B_local[ff_c * 8 + nn_c] = B_local[ff_c * 8 + nn_c] + Apad_shared_local[nn_c]
for ff_inner_inner_inner, nn_inner_inner_inner in T.grid(8, 8):
B[blockIdx_z * 131072 + blockIdx_y * 16384 + threadIdx_y * 2048 + ff_inner_inner_inner * 256 + blockIdx_x * 64 + threadIdx_x * 8 + nn_inner_inner_inner] = B_local[ff_inner_inner_inner * 8 + nn_inner_inner_inner]
@tvm.script.ir_module
class Conv2dCuda1:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "T.noalias": True})
threadIdx_x = T.env_thread("threadIdx.x")
threadIdx_y = T.env_thread("threadIdx.y")
blockIdx_x = T.env_thread("blockIdx.x")
blockIdx_y = T.env_thread("blockIdx.y")
blockIdx_z = T.env_thread("blockIdx.z")
A = T.match_buffer(a, [14*14*256*256], dtype="float32")
B = T.match_buffer(b, [14*14*512*256], dtype="float32")
T.launch_thread(blockIdx_z, 196)
B_local = T.decl_buffer([6400000], "float32", scope="local")
Apad_shared = T.decl_buffer([512], "float32", scope="shared")
Apad_shared_local = T.decl_buffer([8], "float32", scope="local")
T.launch_thread(blockIdx_y, 8)
T.launch_thread(blockIdx_x, 4)
T.launch_thread(threadIdx_y, 8)
T.launch_thread(threadIdx_x, 8)
for ff_c_init, nn_c_init in T.grid(8, 8):
B_local[ff_c_init * 8 + nn_c_init] = T.float32(0)
B_local[6400000-1 + ff_c_init*8] = 0.0
for rc_outer, ry, rx in T.grid(32, 3, 3):
for ax3_inner_outer in T.serial(0, 2):
Apad_shared[T.ramp(threadIdx_y * 64 + threadIdx_x * 8 + ax3_inner_outer * 4, 1, 4)] = T.if_then_else( |
1 <= blockIdx_z
A[T.ramp(ry * 917504 + blockIdx_z * 65536 + rx * 65536 + rc_outer * 2048 + threadIdx_y * 256 + blockIdx_x * 64 + threadIdx_x * 8 + ax3_inner_outer * 4 - 983040, 1, 4)],
T.broadcast(T.float32(0), 4),
dtype="float32x4",
)
for rc_inner in T.serial(0, 8):
for ax3 in T.serial(0, 8):
Apad_shared_local[ax3] = Apad_shared[rc_inner * 64 + threadIdx_x * 8 + ax3]
for ff_c, nn_c in T.grid(8, 8):
B_local[ff_c * 8 + nn_c] = B_local[ff_c * 8 + nn_c] + Apad_shared_local[nn_c]
for ff_inner_inner_inner, nn_inner_inner_inner in T.grid(8, 8):
B[blockIdx_z * 131072 + blockIdx_y * 16384 + threadIdx_y * 2048 + ff_inner_inner_inner * 256 + blockIdx_x * 64 + threadIdx_x * 8 + nn_inner_inner_inner] = B_local[ff_inner_inner_inner * 8 + nn_inner_inner_inner]
@tvm.script.ir_module
class Conv2dCuda2:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "T.noalias": True})
threadIdx_x = T.env_thread("threadIdx.x")
threadIdx_y = T.env_thread("threadIdx.y")
blockIdx_x = T.env_thread("blockIdx.x")
blockIdx_y = T.env_thread("blockIdx.y")
blockIdx_z = T.env_thread("blockIdx.z")
A = T.match_buffer(a, [14*14*256*256], dtype="float32")
B = T.match_buffer(b, [14*14*512*256], dtype="float32")
T.launch_thread(blockIdx_z, 196)
B_local = T.decl_buffer([64], "float32", scope="local")
Apad_shared = T.decl_buffer([512000], "float32", scope="shared")
Apad_shared_local = T.decl_buffer([8], "float32", scope="local")
T.launch_thread(blockIdx_y, 8)
T.launch_thread(blockIdx_x, 4)
T.launch_thread(threadIdx_y, 8)
T.launch_thread(threadIdx_x, 8)
for ff_c_init, nn_c_init in T.grid(8, 8):
B_local[ff_c_init * 8 + nn_c_init] = T.float32( |
0)
for rc_outer, ry, rx in T.grid(32, 3, 3):
for ax3_inner_outer in T.serial(0, 2):
Apad_shared[T.ramp(threadIdx_y * 64 + threadIdx_x * 8 + ax3_inner_outer * 4, 1, 4)] = T.if_then_else(
1 <= blockIdx_z
A[T.ramp(ry * 917504 + blockIdx_z * 65536 + rx * 65536 + rc_outer * 2048 + threadIdx_y * 256 + blockIdx_x * 64 + threadIdx_x * 8 + ax3_inner_outer * 4 - 983040, 1, 4)],
T.broadcast(T.float32(0), 4),
dtype="float32x4",
)
Apad_shared[512000-1] = 0.0
for rc_inner in T.serial(0, 8):
for ax3 in T.serial(0, 8):
Apad_shared_local[ax3] = Apad_shared[rc_inner * 64 + threadIdx_x * 8 + ax3]
for ff_c, nn_c in T.grid(8, 8):
B_local[ff_c * 8 + nn_c] = B_local[ff_c * 8 + nn_c] + Apad_shared_local[nn_c]
for ff_inner_inner_inner, nn_inner_inner_inner in T.grid(8, 8):
B[blockIdx_z * 131072 + blockIdx_y * 16384 + threadIdx_y * 2048 + ff_inner_inner_inner * 256 + blockIdx_x * 64 + threadIdx_x * 8 + nn_inner_inner_inner] = B_local[ff_inner_inner_inner * 8 + nn_inner_inner_inner]
@tvm.script.ir_module
class Conv2dCuda3:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "T.noalias": True})
threadIdx_x = T.env_thread("threadIdx.x")
threadIdx_y = T.env_thread("threadIdx.y")
blockIdx_x = T.env_thread("blockIdx.x")
blockIdx_y = T.env_thread("blockIdx.y")
blockIdx_z = T.env_thread("blockIdx.z")
A = T.match_buffer(a, [14*14*256*256], dtype="float32")
B = T.match_buffer(b, [14*14*512*256], dtype="float32")
T.launch_thread(blockIdx_z, 196)
B_local = T.decl_buffer([64], "float32", scope="local")
Apad_shared = T.decl_buffer([512], "float32", scope="shared")
Apad_shared_local |
= T.decl_buffer([8], "float32", scope="local")
T.launch_thread(blockIdx_y, 8)
T.launch_thread(blockIdx_x, 4)
T.launch_thread(threadIdx_y, 8)
T.launch_thread(threadIdx_x, 800000)
for ff_c_init, nn_c_init in T.grid(8, 8):
B_local[ff_c_init * 8 + nn_c_init] = T.float32(0)
for rc_outer, ry, rx in T.grid(32, 3, 3):
for ax3_inner_outer in T.serial(0, 2):
Apad_shared[T.ramp(threadIdx_y * 64 + threadIdx_x * 8 + ax3_inner_outer * 4, 1, 4)] = T.if_then_else(
1 <= blockIdx_z
A[T.ramp(ry * 917504 + blockIdx_z * 65536 + rx * 65536 + rc_outer * 2048 + threadIdx_y * 256 + blockIdx_x * 64 + threadIdx_x * 8 + ax3_inner_outer * 4 - 983040, 1, 4)],
T.broadcast(T.float32(0), 4),
dtype="float32x4",
)
for rc_inner in T.serial(0, 8):
for ax3 in T.serial(0, 8):
Apad_shared_local[ax3] = Apad_shared[rc_inner * 64 + threadIdx_x * 8 + ax3]
for ff_c, nn_c in T.grid(8, 8):
B_local[ff_c * 8 + nn_c] = B_local[ff_c * 8 + nn_c] + Apad_shared_local[nn_c]
for ff_inner_inner_inner, nn_inner_inner_inner in T.grid(8, 8):
B[blockIdx_z * 131072 + blockIdx_y * 16384 + threadIdx_y * 2048 + ff_inner_inner_inner * 256 + blockIdx_x * 64 + threadIdx_x * 8 + nn_inner_inner_inner] = B_local[ff_inner_inner_inner * 8 + nn_inner_inner_inner]
@T.prim_func
def GmmCuda0(X: T.Buffer[(1, 128, 128), "float32"], Y: T.Buffer[(1, 128, 128), "float32"], Z: T.Buffer[(1, 128, 128), "float32"]) -> None:
Z_local = T.alloc_buffer([1, 128, 128], dtype="float32", scope="local")
X_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
Y_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_fused in T.thread_binding(1, thread="vthread.x"): |
for i0_2_i1_2_i2_2_fused in T.thread_binding(128, thread="threadIdx.x"):
for i1_3_init, i2_4_init in T.grid(4, 2):
with T.block("Z_init"):
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4_init)
T.reads()
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = T.float32(0)
for i3_0 in T.serial(4):
for ax0_ax1_ax2_fused_0 in T.serial(4):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for ax0_ax1_ax2_fused_2 in T.vectorized(2):
with T.block("X_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
v2 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 256 + ax0_ax1_ax2_fused_1 * 2 + ax0_ax1_ax2_fused_2) % 32)
T.reads(X[v0, v1, v2])
T.writes(X_shared[v0, v1, v2])
X_shared[v0, v1, v2] = X[v0, v1, v2]
for ax0_ax1_ax2_fused_0 in T.serial(8):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("Y_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1)
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1) % 32)
T.reads(Y[v0, v1, v2])
T.writes(Y_shared[v0, v1, v2]) |
Y_shared[v0, v1, v2] = Y[v0, v1, v2]
for i3_1, i0_3, i1_3, i2_3, i3_2, i0_4, i1_4, i2_4 in T.grid(1, 1, 4, 1, 32, 1, 1, 2):
with T.block("Z_update"):
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4)
k = T.axis.reduce(128, i3_0 * 32 + i3_2)
T.reads(Z_local[b, i, j], X_shared[b, i, k], Y_shared[b, k, j])
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = Z_local[b, i, j] + X_shared[b, i, k] * Y_shared[b, k, j]
for ax0, ax1, ax2 in T.grid(1, 4, 2):
with T.block("Z_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + ax2)
T.reads(Z_local[v0, v1, v2])
T.writes(Z[v0, v1, v2])
Z[v0, v1, v2] = Z_local[v0, v1, v2]
@T.prim_func
def GmmCuda1(X: T.Buffer[(1, 128, 128), "float32"], Y: T.Buffer[(1, 128, 128), "float32"], Z: T.Buffer[(1, 128, 128), "float32"]) -> None:
Z_local = T.alloc_buffer([1, 128, 128], dtype="float32", scope="local")
X_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
Y_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_fused in T.thread_binding(1, thread="vthread.x"):
for i0_2_i1_2_i2_2_fused in T.thread_binding(128, thread="threadIdx.x"):
for i1_3_init, i2_4_init in T.grid(4, 2):
with T.block("Z_init"):
b = T.axis.spati |
al(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4_init)
T.reads()
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = T.float32(0)
for i3_0 in T.serial(4):
for ax0_ax1_ax2_fused_0 in T.serial(4):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for ax0_ax1_ax2_fused_2 in T.vectorized(2):
with T.block("X_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
v2 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 256 + ax0_ax1_ax2_fused_1 * 2 + ax0_ax1_ax2_fused_2) % 32)
T.reads(X[v0, v1, v2])
T.writes(X_shared[v0, v1, v2])
X_shared[v0, v1, v2] = X[v0, v1, v2]
for ax0_ax1_ax2_fused_0 in T.serial(8):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("Y_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1)
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1) % 32)
T.reads(Y[v0, v1, v2])
T.writes(Y_shared[v0, v1, v2])
Y_shared[v0, v1, v2] = Y[v0, v1, v2]
for i3_1, i0_3, i1_3, i2_3, i3_2, i0_4, i1_4, i2_4 in T.grid(1, 1, 4, 1, 32, 1, 1, 2):
with T.block("Z_update"): |
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4)
k = T.axis.reduce(128, i3_0 * 32 + i3_2)
T.block_attr({
"meta_schedule.thread_extent_low_inclusive": 0,
"meta_schedule.thread_extent_high_inclusive": 32,
})
T.reads(Z_local[b, i, j], X_shared[b, i, k], Y_shared[b, k, j])
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = Z_local[b, i, j] + X_shared[b, i, k] * Y_shared[b, k, j]
for ax0, ax1, ax2 in T.grid(1, 4, 2):
with T.block("Z_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + ax2)
T.reads(Z_local[v0, v1, v2])
T.writes(Z[v0, v1, v2])
Z[v0, v1, v2] = Z_local[v0, v1, v2]
@T.prim_func
def GmmCuda2(X: T.Buffer[(1, 128, 128), "float32"], Y: T.Buffer[(1, 128, 128), "float32"], Z: T.Buffer[(1, 128, 128), "float32"]) -> None:
Z_local = T.alloc_buffer([1, 128, 128], dtype="float32", scope="local")
X_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
Y_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_fused in T.thread_binding(1, thread="vthread.x"):
for i0_2_i1_2_i2_2_fused in T.thread_binding(128, thread="threadIdx.x"):
for i1_3_init, i2_4_init in T.grid(4, 2):
with T.block("Z_init"): |
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4_init)
T.reads()
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = T.float32(0)
for i3_0 in T.serial(4):
for ax0_ax1_ax2_fused_0 in T.serial(4):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for ax0_ax1_ax2_fused_2 in T.vectorized(2):
with T.block("X_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
v2 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 256 + ax0_ax1_ax2_fused_1 * 2 + ax0_ax1_ax2_fused_2) % 32)
T.reads(X[v0, v1, v2])
T.writes(X_shared[v0, v1, v2])
X_shared[v0, v1, v2] = X[v0, v1, v2]
for ax0_ax1_ax2_fused_0 in T.serial(8):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("Y_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1)
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1) % 32)
T.reads(Y[v0, v1, v2])
T.writes(Y_shared[v0, v1, v2])
Y_shared[v0, v1, v2] = Y[v0, v1, v2]
for i3_1, i0_3, i1_3, i2_3, i3_2, i0_4, i1_4, i2_4 in T.grid(1, 1, 4, 1, 32, 1, 1, 2):
with T. |
block("Z_update"):
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4)
k = T.axis.reduce(128, i3_0 * 32 + i3_2)
T.block_attr({
"meta_schedule.thread_extent_low_inclusive": 1024,
"meta_schedule.thread_extent_high_inclusive": 1024,
})
T.reads(Z_local[b, i, j], X_shared[b, i, k], Y_shared[b, k, j])
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = Z_local[b, i, j] + X_shared[b, i, k] * Y_shared[b, k, j]
for ax0, ax1, ax2 in T.grid(1, 4, 2):
with T.block("Z_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + ax2)
T.reads(Z_local[v0, v1, v2])
T.writes(Z[v0, v1, v2])
Z[v0, v1, v2] = Z_local[v0, v1, v2]
@T.prim_func
def GMMCUDATensorCore(
X: T.Buffer[(1024, 1024), "float16"],
Y: T.Buffer[(1024, 1024), "float16"],
Z: T.Buffer[(1024, 1024), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
s0 = T.var("int32")
s0_1 = T.var("int32")
s0_2 = T.var("int32")
s1 = T.var("int32")
s1_1 = T.var("int32")
s1_2 = T.var("int32")
Z_wmma_accumulator = T.alloc_buffer([1024, 1024], dtype="float32", scope="wmma.accumulator")
X_shared = T.alloc_buffer([1024, 1024], dtype="float16", scope="shared")
Y_shared = T.alloc_buffer([1024, 1024], dtype="float16", scope="shared")
X_shared_wmma_matrix_a = T.alloc_buffer([1024, 1024], dty |
pe="float16", scope="wmma.matrix_a")
Y_shared_wmma_matrix_b = T.alloc_buffer([1024, 1024], dtype="float16", scope="wmma.matrix_b")
for ax0_0_ax1_0_0_ax2_0_0_fused in T.thread_binding(64, thread="blockIdx.x"):
for ax0_1_ax1_0_1_ax2_0_1_fused in T.thread_binding(2, thread="blockIdx.y"):
for ax0_2_ax1_0_2_ax2_0_2_fused in T.thread_binding(2, thread="threadIdx.y"):
for ax1_0_3_init, ax2_0_3_init, ax1_0_4_init, ax2_0_4_init in T.grid(2, 1, 2, 4):
with T.block("Z_o_init"):
v0 = T.axis.spatial(1, 0)
v1_o = T.axis.spatial(
64,
ax0_0_ax1_0_0_ax2_0_0_fused % 64
+ ax0_1_ax1_0_1_ax2_0_1_fused % 2 * 8
+ ax0_2_ax1_0_2_ax2_0_2_fused % 2 * 4
+ ax1_0_3_init * 2
+ ax1_0_4_init,
)
v2_o = T.axis.spatial(
64,
(ax0_0_ax1_0_0_ax2_0_0_fused % 16 + 0 + 0 + ax2_0_3_init) * 4
+ ax2_0_4_init,
)
T.reads()
T.writes(
Z_wmma_accumulator[
v1_o * 16 : v1_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
]
)
T.block_attr(
{
"meta_schedule.thread_extent_high_inclusive": 1024,
"meta_schedule.thread_extent_low_inclusive": 32,
"warp_execution": 1,
}
)
C = T.match_buffer(
Z_wmma_accumulator[
v1_o * 16 : v1_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
], |
[16, 16],
dtype="float32",
scope="wmma.accumulator",
offset_factor=16,
)
T.evaluate(
T.tvm_fill_fragment(
C.data,
16,
16,
16,
C.elem_offset
T.float32(0),
dtype="handle",
)
)
for ax3_0_0 in T.serial(32):
for ax0_ax1_fused_0 in T.serial(16):
for ax0_ax1_fused_1 in T.thread_binding(2, thread="threadIdx.y"):
for ax0_ax1_fused_2 in T.thread_binding(32, thread="threadIdx.x"):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("X_shared"):
v0 = T.axis.spatial(
1024,
ax0_0_ax1_0_0_ax2_0_0_fused
+ ax0_1_ax1_0_1_ax2_0_1_fused * 128
+ (
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 128
+ ax0_ax1_fused_2 * 4
+ ax0_ax1_fused_3
)
)
v1 = T.axis.spatial(
1024,
ax3_0_0 * 32
+ (
ax0_ax1_fused_0 * 256 |
+ ax0_ax1_fused_1 * 128
+ ax0_ax1_fused_2 * 4
+ ax0_ax1_fused_3
)
% 32,
)
T.reads(X[v0, v1])
T.writes(X_shared[v0, v1])
T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]]})
X_shared[v0, v1] = X[v0, v1]
for ax0_ax1_fused_0 in T.serial(8):
for ax0_ax1_fused_1 in T.thread_binding(2, thread="threadIdx.y"):
for ax0_ax1_fused_2 in T.thread_binding(32, thread="threadIdx.x"):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("Y_shared"):
v0 = T.axis.spatial(
1024,
ax3_0_0 * 32
+ (
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 128
+ ax0_ax1_fused_2 * 4
+ ax0_ax1_fused_3
)
)
v1 = T.axis.spatial(
1024,
ax0_0_ax1_0_0_ax2_0_0_fused % 16 * 64
+ (
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 128 |
+ ax0_ax1_fused_2 * 4
+ ax0_ax1_fused_3
)
% 64,
)
T.reads(Y[v0, v1])
T.writes(Y_shared[v0, v1])
T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]]})
Y_shared[v0, v1] = Y[v0, v1]
for ax3_0_1 in T.serial(2):
for ax0_0, ax1_0 in T.grid(4, 1):
with T.block("X_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(
64,
ax0_0_ax1_0_0_ax2_0_0_fused
+ ax0_1_ax1_0_1_ax2_0_1_fused * 8
+ ax0_2_ax1_0_2_ax2_0_2_fused * 4
+ ax0_0,
)
v1_o = T.axis.spatial(64, ax3_0_0 * 2 + ax3_0_1)
T.reads(
X_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16]
)
T.writes(
X_shared_wmma_matrix_a[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
]
)
A = T.match_buffer(
X_shared[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
[16, 16],
dtype="float16",
strides=[s1, s0],
scope="shared", |
offset_factor=16,
)
C_1 = T.match_buffer(
X_shared_wmma_matrix_a[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
[16, 16],
dtype="float16",
scope="wmma.matrix_a",
offset_factor=16,
)
T.evaluate(
T.tvm_load_matrix_sync(
C_1.data,
16,
16,
16,
C_1.elem_offset
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A.data,
A.elem_offset,
s1 * 16,
1,
dtype="handle",
),
s1,
"row_major",
dtype="handle",
)
)
for ax0_0, ax1_0 in T.grid(1, 4):
with T.block("Y_shared_wmma.matrix_b_o"):
v0_o = T.axis.spatial(64, ax3_0_0 * 2 + ax3_0_1)
v1_o = T.axis.spatial(
64, ax0_0_ax1_0_0_ax2_0_0_fused % 16 * 4 + ax1_0
)
T.reads( |
Y_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16]
)
T.writes(
Y_shared_wmma_matrix_b[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
]
)
A_1 = T.match_buffer(
Y_shared[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
[16, 16],
dtype="float16",
strides=[s1_1, s0_1],
scope="shared",
offset_factor=16,
)
C_2 = T.match_buffer(
Y_shared_wmma_matrix_b[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
[16, 16],
dtype="float16",
scope="wmma.matrix_b",
offset_factor=16,
)
T.evaluate(
T.tvm_load_matrix_sync(
C_2.data,
16,
16,
16,
C_2.elem_offset
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A_1.data,
A_1.elem_offset,
s |
1_1 * 16,
1,
dtype="handle",
),
s1_1,
"row_major",
dtype="handle",
)
)
for ax0_3, ax1_0_3, ax2_0_3, ax3_0_2, ax0_4, ax1_0_4, ax2_0_4 in T.grid(
1, 2, 1, 1, 1, 2, 4
):
with T.block("Z_o_update"):
v0 = T.axis.spatial(1, 0)
v1_o = T.axis.spatial(
64,
ax0_0_ax1_0_0_ax2_0_0_fused % 64
+ ax0_1_ax1_0_1_ax2_0_1_fused % 2 * 8
+ ax0_2_ax1_0_2_ax2_0_2_fused % 2 * 4
+ ax1_0_3 * 2
+ ax1_0_4,
)
v2_o = T.axis.spatial(
64,
(ax0_0_ax1_0_0_ax2_0_0_fused % 16 + 0 + 0 + ax2_0_3) * 4
+ ax2_0_4,
)
v3_o = T.axis.reduce(64, ax3_0_0 * 2 + ax3_0_1 + ax3_0_2)
T.reads(
Z_wmma_accumulator[
v1_o * 16 : v1_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
],
X_shared_wmma_matrix_a[
v1_o * 16 : v1_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16
],
Y_shared_wmma_matrix_b[
v3_o * 16 : v3_o * 16 + 16, v2_o * 16 : v2_o * |
16 + 16
],
)
T.writes(
Z_wmma_accumulator[
v1_o * 16 : v1_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
]
)
T.block_attr(
{
"meta_schedule.thread_extent_high_inclusive": 1024,
"meta_schedule.thread_extent_low_inclusive": 32,
"warp_execution": 1,
}
)
A_2 = T.match_buffer(
X_shared_wmma_matrix_a[
v1_o * 16 : v1_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16
],
[16, 16],
dtype="float16",
scope="wmma.matrix_a",
offset_factor=16,
)
B = T.match_buffer(
Y_shared_wmma_matrix_b[
v3_o * 16 : v3_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
],
[16, 16],
dtype="float16",
scope="wmma.matrix_b",
offset_factor=16,
)
C_3 = T.match_buffer(
Z_wmma_accumulator[
v1_o * 16 : v1_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
],
[16, 16], |
dtype="float32",
scope="wmma.accumulator",
offset_factor=16,
)
T.evaluate(
T.tvm_mma_sync(
C_3.data,
C_3.elem_offset
A_2.data,
A_2.elem_offset
B.data,
B.elem_offset
C_3.data,
C_3.elem_offset
dtype="handle",
)
)
for ax0_0, ax1_0 in T.grid(4, 4):
with T.block("Z_wmma.accumulator_o"):
v0_o = T.axis.spatial(
64,
ax0_0_ax1_0_0_ax2_0_0_fused
+ ax0_1_ax1_0_1_ax2_0_1_fused * 8
+ ax0_2_ax1_0_2_ax2_0_2_fused * 4
+ ax0_0,
)
v1_o = T.axis.spatial(64, ax0_0_ax1_0_0_ax2_0_0_fused % 16 * 4 + ax1_0)
T.reads(
Z_wmma_accumulator[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
]
)
T.writes(Z[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
A_3 = T.match_buffer(
Z_wmma_accumulator[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
[16, 16],
dtype="float32",
scope="wmma.accumulator",
offse |
t_factor=16,
)
C_4 = T.match_buffer(
Z[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16],
[16, 16],
dtype="float32",
strides=[s1_2, s0_2],
offset_factor=16,
)
T.evaluate(
T.tvm_store_matrix_sync(
A_3.data,
16,
16,
16,
A_3.elem_offset
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
C_4.data,
C_4.elem_offset,
s1_2 * 16,
2,
dtype="handle",
),
s1_2,
"row_major",
dtype="handle",
)
)
@pytest.mark.parametrize("mod", [Conv2dCuda0, Conv2dCuda1, GmmCuda0, GMMCUDATensorCore])
def test_postproc_check_pass(mod):
ctx = _create_context(mod, target=_target())
sch = tir.Schedule(mod, debug_mask="all")
assert ctx.space_generator.postprocs[0].apply(sch)
@pytest.mark.parametrize(
"mod",
[
Conv2dCuda2,
Conv2dCuda3,
GmmCuda1,
GmmCuda2,
],
)
def test_postproc_check_fail(mod):
ctx = _create_context(mod, target=_target())
sch = tir.Schedule(mod, debug_mask="all")
assert not ctx.space_generator.postprocs[0].apply(sch)
if __name__ == "__main__":
tvm.testing.main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test Meta Schedule Profiler """
import time
from tvm import meta_schedule as ms
def test_meta_schedule_profiler_context_manager():
with ms.Profiler() as profiler:
time.sleep(1)
with ms.Profiler.timeit("Level0"):
time.sleep(1)
with ms.Profiler.timeit("Level1"):
time.sleep(2)
# Note that the results are in seconds
result = profiler.get()
assert len(result) == 3
assert 3.9 <= result["Total"] <= 4.1
assert 2.9 <= result["Level0"] <= 3.1
assert 1.9 <= result["Level1"] <= 2.1
def test_meta_schedule_no_context():
with ms.Profiler.timeit("Level0"):
assert ms.Profiler.current() is None
if __name__ == "__main__":
test_meta_schedule_profiler_context_manager()
test_meta_schedule_no_context()
|
"""Integration test for MetaSchedule""" |
import tempfile
from typing |
import List |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import IRModule
from tvm |
import meta_schedule as ms
from tvm |
import relay, te, tir
from tvm._ffi |
import register_func
from tvm.contrib |
import graph_executor
from tvm.ir.transform |
import PassContext
from tvm.meta_schedule.database |
import TuningRecord, Workload
from tvm.meta_schedule.testing.relay_workload |
import get_network
from tvm.meta_schedule.testing.tlcbench |
import load_quantized_bert_base
from tvm.meta_schedule.tune_context |
import _normalize_mod
from tvm.script |
import tir as T
from tvm.target |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.