text
stringlengths 1
2.05k
|
---|
map("SSR", [i1_0, i0_3, i1_1])
T.reads(A[i0_4, vi1_0 * 16 + vi1_1], T_softmax_maxelem[i0_4])
T.writes(T_softmax_expsum_rf[i0_4, vi1_0])
with T.init():
T_softmax_expsum_rf[i0_4, vi1_0] = T.float32(0)
T_softmax_expsum_rf[i0_4, vi1_0] = T_softmax_expsum_rf[i0_4, vi1_0] + T.exp(A[i0_4, vi1_0 * 16 + vi1_1] - T_softmax_maxelem[i0_4], dtype="float32")
for i0_5, i1 in T.grid(256, 256):
for ax0, ax1 in T.grid(16, 1):
with T.block("T_softmax_expsum"):
vi1_0 = T.axis.reduce(16, ax0)
i0_6 = T.axis.spatial(256, i0_5 + ax1)
T.reads(T_softmax_expsum_rf[i0_6, vi1_0])
T.writes(T_softmax_expsum[i0_6])
with T.init():
T_softmax_expsum[i0_6] = T.float32(0)
T_softmax_expsum[i0_6] = T_softmax_expsum[i0_6] + T_softmax_expsum_rf[i0_6, vi1_0]
with T.block("T_softmax_norm"):
i0_7, i1_2 = T.axis.remap("SS", [i0_5, i1])
T.reads(A[i0_7, i1_2], T_softmax_maxelem[i0_7], T_softmax_expsum[i0_7])
T.writes(T_softmax_norm[i0_7, i1_2])
T.block_attr({"axis":1})
T_softmax_norm[i0_7, i1_2] = T.exp(A[i0_7, i1_2] - T_softmax_maxelem[i0_7], dtype="float32") / T_softmax_expsum[i0_7]
@T.prim_func
def sfm_1(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":16, "meta_schedule.vectorize":64})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32") |
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum_rf = T.alloc_buffer([256, 16], dtype="float32")
T_softmax_maxelem_rf = T.alloc_buffer([256, 64], dtype="float32")
for i0 in T.serial(256):
for ax0, ax1, ax2 in T.grid(64, 1, 4):
with T.block("T_softmax_maxelem_rf"):
vi1_1 = T.axis.spatial(64, ax0)
i0_1 = T.axis.spatial(256, i0 + ax1)
vi1_0 = T.axis.reduce(4, ax2)
T.reads(A[i0_1, vi1_0 * 64 + vi1_1])
T.writes(T_softmax_maxelem_rf[i0_1, vi1_1])
with T.init():
T_softmax_maxelem_rf[i0_1, vi1_1] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem_rf[i0_1, vi1_1] = T.max(T_softmax_maxelem_rf[i0_1, vi1_1], A[i0_1, vi1_0 * 64 + vi1_1])
for i1 in T.serial(256):
for ax0, ax1 in T.grid(64, 1):
with T.block("T_softmax_maxelem"):
vi1_1 = T.axis.reduce(64, ax0)
i0_2 = T.axis.spatial(256, i0 + ax1)
T.reads(T_softmax_maxelem_rf[i0_2, vi1_1])
T.writes(T_softmax_maxelem[i0_2])
with T.init():
T_softmax_maxelem[i0_2] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[i0_2] = T.max(T_softmax_maxelem[i0_2], T_softmax_maxelem_rf[i0_2, vi1_1])
with T.block("T_softmax_exp"):
i0_3, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(A[i0_3, i1_1], T_softmax_maxelem[i0_3])
T.writes(T_softmax_exp[i0_3, i1_1])
T_softmax_exp[i0_3, i1_1] = T.exp(A[i0_3, i1_1] - T_softmax_maxelem[i0_3], dtype="float32")
for i0_4, i1_0, i1_1_1 in T.grid(256, 16, 16):
wi |
th T.block("T_softmax_expsum_rf"):
vi1_0, i0_5, vi1_1 = T.axis.remap("SSR", [i1_0, i0_4, i1_1_1])
T.reads(T_softmax_exp[i0_5, vi1_0 * 16 + vi1_1])
T.writes(T_softmax_expsum_rf[i0_5, vi1_0])
with T.init():
T_softmax_expsum_rf[i0_5, vi1_0] = T.float32(0)
T_softmax_expsum_rf[i0_5, vi1_0] = T_softmax_expsum_rf[i0_5, vi1_0] + T_softmax_exp[i0_5, vi1_0 * 16 + vi1_1]
for i0_6, i1_0 in T.grid(256, 16):
with T.block("T_softmax_expsum"):
vi1_0, i0_7 = T.axis.remap("RS", [i1_0, i0_6])
T.reads(T_softmax_expsum_rf[i0_7, vi1_0])
T.writes(T_softmax_expsum[i0_7])
with T.init():
T_softmax_expsum[i0_7] = T.float32(0)
T_softmax_expsum[i0_7] = T_softmax_expsum[i0_7] + T_softmax_expsum_rf[i0_7, vi1_0]
for i0_8, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_9, i1_2 = T.axis.remap("SS", [i0_8, i1])
T.reads(T_softmax_exp[i0_9, i1_2], T_softmax_expsum[i0_9])
T.writes(T_softmax_norm[i0_9, i1_2])
T.block_attr({"axis":1})
T_softmax_norm[i0_9, i1_2] = T_softmax_exp[i0_9, i1_2] / T_softmax_expsum[i0_9]
@T.prim_func
def sfm_2(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum_rf = T.alloc_buffer([256, 16], dtype="float32")
for |
i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1_0, i1_1 in T.grid(256, 16, 16):
with T.block("T_softmax_expsum_rf"):
vi1_0, i0_2, vi1_1 = T.axis.remap("SSR", [i1_0, i0, i1_1])
T.reads(A[i0_2, vi1_0 * 16 + vi1_1], T_softmax_maxelem[i0_2])
T.writes(T_softmax_expsum_rf[i0_2, vi1_0])
with T.init():
T_softmax_expsum_rf[i0_2, vi1_0] = T.float32(0)
T_softmax_expsum_rf[i0_2, vi1_0] = T_softmax_expsum_rf[i0_2, vi1_0] + T.exp(A[i0_2, vi1_0 * 16 + vi1_1] - T_softmax_maxelem[i0_2], dtype="float32")
for i0_3, i1_0 in T.grid(256, 16):
with T.block("T_softmax_expsum"):
vi1_0, i0_4 = T.axis.remap("RS", [i1_0, i0_3])
T.reads(T_softmax_expsum_rf[i0_4, vi1_0])
T.writes(T_softmax_expsum[i0_4])
with T.init():
T_softmax_expsum[i0_4] = T.float32(0)
T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T_softmax_expsum_rf[i0_4, vi1_0]
for i0_5, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_6, i1_2 = T.axis.remap("SS", [i0_5, i1])
T.reads(A[i0_6, i1_2], T_softmax_maxelem[i0_6], T_softmax_expsum[i0_6])
T.writes(T_softmax_norm[i0_6, i1_2])
T.block_attr({"axis":1})
T_softmax_norm[i0_6, i1_2] = T.exp(A[i0_6, i1_2] - T_softmax_maxelem[i0_6], dtype="float32") / T_softmax_expsum[i0_6]
@T.prim_func
def sfm_3 |
(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum_rf = T.alloc_buffer([256, 16], dtype="float32")
T_softmax_maxelem_rf = T.alloc_buffer([256, 256], dtype="float32")
for i0, i1 in T.grid(256, 256):
for ax0, ax1, ax2 in T.grid(256, 1, 1):
with T.block("T_softmax_maxelem_rf"):
vi1_0 = T.axis.spatial(256, ax0)
i0_1 = T.axis.spatial(256, i0 + ax1)
vi1_1 = T.axis.reduce(1, ax2)
T.reads(A[i0_1, vi1_1 + vi1_0])
T.writes(T_softmax_maxelem_rf[i0_1, vi1_0])
with T.init():
T_softmax_maxelem_rf[i0_1, vi1_0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem_rf[i0_1, vi1_0] = T.max(T_softmax_maxelem_rf[i0_1, vi1_0], A[i0_1, vi1_1 + vi1_0])
for ax0, ax1 in T.grid(256, 1):
with T.block("T_softmax_maxelem"):
vi1_0 = T.axis.reduce(256, ax0)
i0_2 = T.axis.spatial(256, i0 + ax1)
T.reads(T_softmax_maxelem_rf[i0_2, vi1_0])
T.writes(T_softmax_maxelem[i0_2])
with T.init():
T_softmax_maxelem[i0_2] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[i0_2] = T.max(T_softmax_maxelem[i0_2], T_softmax_maxelem_rf[i0_2, v |
i1_0])
for ax0, ax1 in T.grid(1, 256):
with T.block("T_softmax_exp"):
i0_3 = T.axis.spatial(256, i0 + ax0)
i1_1 = T.axis.spatial(256, ax1)
T.reads(A[i0_3, i1_1], T_softmax_maxelem[i0_3])
T.writes(T_softmax_exp[i0_3, i1_1])
T_softmax_exp[i0_3, i1_1] = T.exp(A[i0_3, i1_1] - T_softmax_maxelem[i0_3], dtype="float32")
for ax0 in T.serial(16):
for ax0_1, ax1, ax2 in T.grid(1, 1, 16):
with T.block("T_softmax_expsum_rf"):
vi1_1 = T.axis.spatial(16, ax0 + ax0_1)
i0_4 = T.axis.spatial(256, i0 + ax1)
vi1_0 = T.axis.reduce(16, ax2)
T.reads(T_softmax_exp[i0_4, vi1_0 * 16 + vi1_1])
T.writes(T_softmax_expsum_rf[i0_4, vi1_1])
with T.init():
T_softmax_expsum_rf[i0_4, vi1_1] = T.float32(0)
T_softmax_expsum_rf[i0_4, vi1_1] = T_softmax_expsum_rf[i0_4, vi1_1] + T_softmax_exp[i0_4, vi1_0 * 16 + vi1_1]
for ax1 in T.serial(1):
with T.block("T_softmax_expsum"):
vi1_1 = T.axis.reduce(16, ax0)
i0_5 = T.axis.spatial(256, i0 + ax1)
T.reads(T_softmax_expsum_rf[i0_5, vi1_1])
T.writes(T_softmax_expsum[i0_5])
with T.init():
T_softmax_expsum[i0_5] = T.float32(0)
T_softmax_expsum[i0_5] = T_softmax_expsum[i0_5] + T_softmax_expsum_rf[i0_5, vi1_1]
with T.block("T_softmax_norm"):
i0_6, i1_2 = T.axis.remap("SS", [i0, i1])
T.reads(T_softmax_exp[i0_6, i1_2], T_softmax_expsum[i0_6])
T.writes(T_softmax_norm |
[i0_6, i1_2])
T.block_attr({"axis":1})
T_softmax_norm[i0_6, i1_2] = T_softmax_exp[i0_6, i1_2] / T_softmax_expsum[i0_6]
@T.prim_func
def sfm_4(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum_rf = T.alloc_buffer([256, 16], dtype="float32")
T_softmax_maxelem_rf = T.alloc_buffer([256, 1], dtype="float32")
for i0 in T.serial(256):
for ax0, ax1, ax2 in T.grid(1, 1, 256):
with T.block("T_softmax_maxelem_rf"):
vi1_1 = T.axis.spatial(1, ax0)
i0_1 = T.axis.spatial(256, i0 + ax1)
vi1_0 = T.axis.reduce(256, ax2)
T.reads(A[i0_1, vi1_1 + vi1_0])
T.writes(T_softmax_maxelem_rf[i0_1, vi1_1])
with T.init():
T_softmax_maxelem_rf[i0_1, vi1_1] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem_rf[i0_1, vi1_1] = T.max(T_softmax_maxelem_rf[i0_1, vi1_1], A[i0_1, vi1_1 + vi1_0])
for i1_1 in T.serial(1):
with T.block("T_softmax_maxelem"):
vi1_1, i0_2 = T.axis.remap("RS", [i1_1, i0])
T.reads(T_softmax_maxelem_rf[i0_2, vi1_1])
T.writes(T_softmax_maxelem[i0_2])
with T.init():
T_softmax_maxelem[i0_2] = T.float32(-3.40282346 |
63852886e+38)
T_softmax_maxelem[i0_2] = T.max(T_softmax_maxelem[i0_2], T_softmax_maxelem_rf[i0_2, vi1_1])
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_exp"):
i0_4, i1_2 = T.axis.remap("SS", [i0_3, i1])
T.reads(A[i0_4, i1_2], T_softmax_maxelem[i0_4])
T.writes(T_softmax_exp[i0_4, i1_2])
T_softmax_exp[i0_4, i1_2] = T.exp(A[i0_4, i1_2] - T_softmax_maxelem[i0_4], dtype="float32")
for i0_5, i1_0, i1_1 in T.grid(256, 16, 16):
with T.block("T_softmax_expsum_rf"):
vi1_1, i0_6, vi1_0 = T.axis.remap("SSR", [i1_1, i0_5, i1_0])
T.reads(T_softmax_exp[i0_6, vi1_0 * 16 + vi1_1])
T.writes(T_softmax_expsum_rf[i0_6, vi1_1])
with T.init():
T_softmax_expsum_rf[i0_6, vi1_1] = T.float32(0)
T_softmax_expsum_rf[i0_6, vi1_1] = T_softmax_expsum_rf[i0_6, vi1_1] + T_softmax_exp[i0_6, vi1_0 * 16 + vi1_1]
for i0_7, i1_1 in T.grid(256, 16):
with T.block("T_softmax_expsum"):
vi1_1, i0_8 = T.axis.remap("RS", [i1_1, i0_7])
T.reads(T_softmax_expsum_rf[i0_8, vi1_1])
T.writes(T_softmax_expsum[i0_8])
with T.init():
T_softmax_expsum[i0_8] = T.float32(0)
T_softmax_expsum[i0_8] = T_softmax_expsum[i0_8] + T_softmax_expsum_rf[i0_8, vi1_1]
for i0_9, i1_3 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_10, i1_4 = T.axis.remap("SS", [i0_9, i1_3])
T.reads(T_softmax_exp[i0_10, i1_4], T_softmax_expsum[i0_10])
T.writes(T_softmax_norm[i0_10, i1_4])
T.block_attr({"axis":1})
T_softmax_norm[i0_10, i1_4] = T_softmax_exp[i0_10, i1_4] / T_softmax_expsum[i0_10]
@T.prim_func
def sfm_5(A: T. |
Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum_rf = T.alloc_buffer([256, 16], dtype="float32")
for i0 in T.serial(256):
for ax0, ax1 in T.grid(1, 256):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for ax0, ax1 in T.grid(1, 256):
with T.block("T_softmax_exp"):
i0_2 = T.axis.spatial(256, i0 + ax0)
i1 = T.axis.spatial(256, ax1)
T.reads(A[i0_2, i1], T_softmax_maxelem[i0_2])
T.writes(T_softmax_exp[i0_2, i1])
T_softmax_exp[i0_2, i1] = T.exp(A[i0_2, i1] - T_softmax_maxelem[i0_2], dtype="float32")
for ax0 in T.serial(16):
for ax0_1, ax1, ax2 in T.grid(1, 1, 16):
with T.block("T_softmax_expsum_rf"):
vi1_1 = T.axis.spatial(16, ax0 + ax0_1)
i0_3 = T.axis.spatial(256, i0 + ax1)
vi1_0 = T.axis.reduce(16, ax2) |
T.reads(T_softmax_exp[i0_3, vi1_0 * 16 + vi1_1])
T.writes(T_softmax_expsum_rf[i0_3, vi1_1])
with T.init():
T_softmax_expsum_rf[i0_3, vi1_1] = T.float32(0)
T_softmax_expsum_rf[i0_3, vi1_1] = T_softmax_expsum_rf[i0_3, vi1_1] + T_softmax_exp[i0_3, vi1_0 * 16 + vi1_1]
for ax1 in T.serial(1):
with T.block("T_softmax_expsum"):
vi1_1 = T.axis.reduce(16, ax0)
i0_4 = T.axis.spatial(256, i0 + ax1)
T.reads(T_softmax_expsum_rf[i0_4, vi1_1])
T.writes(T_softmax_expsum[i0_4])
with T.init():
T_softmax_expsum[i0_4] = T.float32(0)
T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T_softmax_expsum_rf[i0_4, vi1_1]
for i1 in T.serial(256):
with T.block("T_softmax_norm"):
i0_5, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(T_softmax_exp[i0_5, i1_1], T_softmax_expsum[i0_5])
T.writes(T_softmax_norm[i0_5, i1_1])
T.block_attr({"axis":1})
T_softmax_norm[i0_5, i1_1] = T_softmax_exp[i0_5, i1_1] / T_softmax_expsum[i0_5]
@T.prim_func
def sfm_6(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
T_softmax_maxelem_rf = T.alloc_buffer([ |
256, 64], dtype="float32")
for i0 in T.serial(256):
for ax0, ax1, ax2 in T.grid(64, 1, 4):
with T.block("T_softmax_maxelem_rf"):
vi1_0 = T.axis.spatial(64, ax0)
i0_1 = T.axis.spatial(256, i0 + ax1)
vi1_1 = T.axis.reduce(4, ax2)
T.reads(A[i0_1, vi1_0 * 4 + vi1_1])
T.writes(T_softmax_maxelem_rf[i0_1, vi1_0])
with T.init():
T_softmax_maxelem_rf[i0_1, vi1_0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem_rf[i0_1, vi1_0] = T.max(T_softmax_maxelem_rf[i0_1, vi1_0], A[i0_1, vi1_0 * 4 + vi1_1])
for i1_0 in T.serial(64):
with T.block("T_softmax_maxelem"):
vi1_0, i0_2 = T.axis.remap("RS", [i1_0, i0])
T.reads(T_softmax_maxelem_rf[i0_2, vi1_0])
T.writes(T_softmax_maxelem[i0_2])
with T.init():
T_softmax_maxelem[i0_2] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[i0_2] = T.max(T_softmax_maxelem[i0_2], T_softmax_maxelem_rf[i0_2, vi1_0])
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_4, k = T.axis.remap("SR", [i0_3, i1])
T.reads(A[i0_4, k], T_softmax_maxelem[i0_4])
T.writes(T_softmax_expsum[i0_4])
with T.init():
T_softmax_expsum[i0_4] = T.float32(0)
T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T.exp(A[i0_4, k] - T_softmax_maxelem[i0_4], dtype="float32")
for i0_5, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_6, i1_1 = T.axis.remap("SS", [i0_5, i1])
T.reads(A[i0_6, i1_1], T_softmax_maxelem[i0_6], T_softmax_expsum[i0_6]) |
T.writes(T_softmax_norm[i0_6, i1_1])
T.block_attr({"axis":1})
T_softmax_norm[i0_6, i1_1] = T.exp(A[i0_6, i1_1] - T_softmax_maxelem[i0_6], dtype="float32") / T_softmax_expsum[i0_6]
@T.prim_func
def sfm_7(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
T_softmax_maxelem_rf = T.alloc_buffer([256, 4], dtype="float32")
for i0, i1_0, i1_1 in T.grid(256, 64, 4):
with T.block("T_softmax_maxelem_rf"):
vi1_1, i0_1, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(A[i0_1, vi1_0 * 4 + vi1_1])
T.writes(T_softmax_maxelem_rf[i0_1, vi1_1])
with T.init():
T_softmax_maxelem_rf[i0_1, vi1_1] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem_rf[i0_1, vi1_1] = T.max(T_softmax_maxelem_rf[i0_1, vi1_1], A[i0_1, vi1_0 * 4 + vi1_1])
for i0, i1_1 in T.grid(256, 4):
with T.block("T_softmax_maxelem"):
vi1_1, i0_2 = T.axis.remap("RS", [i1_1, i0])
T.reads(T_softmax_maxelem_rf[i0_2, vi1_1])
T.writes(T_softmax_maxelem[i0_2])
with T.init():
T_softmax_maxelem[i0_2] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[i0_2] = T.max(T_softmax_maxelem[i0_2], T_softmax_maxelem_rf[i0_2, vi1_1])
for i0_3, i1 in T.grid(256, 256):
for ax0, ax1 in T.grid(1, 256):
with T.block( |
"T_softmax_expsum"):
i0_4 = T.axis.spatial(256, i0_3 + ax0)
k = T.axis.reduce(256, ax1)
T.reads(A[i0_4, k], T_softmax_maxelem[i0_4])
T.writes(T_softmax_expsum[i0_4])
with T.init():
T_softmax_expsum[i0_4] = T.float32(0)
T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T.exp(A[i0_4, k] - T_softmax_maxelem[i0_4], dtype="float32")
with T.block("T_softmax_norm"):
i0_5, i1_2 = T.axis.remap("SS", [i0_3, i1])
T.reads(A[i0_5, i1_2], T_softmax_maxelem[i0_5], T_softmax_expsum[i0_5])
T.writes(T_softmax_norm[i0_5, i1_2])
T.block_attr({"axis":1})
T_softmax_norm[i0_5, i1_2] = T.exp(A[i0_5, i1_2] - T_softmax_maxelem[i0_5], dtype="float32") / T_softmax_expsum[i0_5]
@T.prim_func
def sfm_8(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0 in T.serial(256):
for ax0, ax1 in T.grid(1, 256):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e+ |
38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i1 in T.serial(256):
with T.block("T_softmax_exp"):
i0_2, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(A[i0_2, i1_1], T_softmax_maxelem[i0_2])
T.writes(T_softmax_exp[i0_2, i1_1])
T_softmax_exp[i0_2, i1_1] = T.exp(A[i0_2, i1_1] - T_softmax_maxelem[i0_2], dtype="float32")
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_4, k = T.axis.remap("SR", [i0_3, i1])
T.reads(T_softmax_exp[i0_4, k])
T.writes(T_softmax_expsum[i0_4])
with T.init():
T_softmax_expsum[i0_4] = T.float32(0)
T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T_softmax_exp[i0_4, k]
for i0_5, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_6, i1_2 = T.axis.remap("SS", [i0_5, i1])
T.reads(T_softmax_exp[i0_6, i1_2], T_softmax_expsum[i0_6])
T.writes(T_softmax_norm[i0_6, i1_2])
T.block_attr({"axis":1})
T_softmax_norm[i0_6, i1_2] = T_softmax_exp[i0_6, i1_2] / T_softmax_expsum[i0_6]
decision_0 = [
("SamplePerfectTile", [16, 16]),
("SamplePerfectTile", [4, 64]),
("SampleCategorical", 0),
("SampleComputeLocation", 1),
("SampleComputeLocation", -1),
("SampleComputeLocation", -2),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
]
decision_1 = [
("SamplePerfectTile", [16, 16]),
("SamplePerfectTile", [4, 64]),
("SampleCategorical", 1),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
("SampleComputeLocation", 1),
("SampleComputeLoc |
ation", 0),
]
decision_2 = [
("SamplePerfectTile", [16, 16]),
("SampleCategorical", 3),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
("SampleComputeLocation", -2),
("SampleComputeLocation", -1),
]
decision_3 = [
("SamplePerfectTile", [16, 16]),
("SamplePerfectTile", [256, 1]),
("SampleCategorical", 3),
("SampleComputeLocation", 1),
("SampleComputeLocation", 2),
("SampleComputeLocation", 1),
("SampleComputeLocation", 1),
("SampleComputeLocation", 1),
]
decision_4 = [
("SamplePerfectTile", [16, 16]),
("SamplePerfectTile", [256, 1]),
("SampleCategorical", 0),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
("SampleComputeLocation", 0),
]
decision_5 = [
("SamplePerfectTile", [16, 16]),
("SampleCategorical", 3),
("SampleComputeLocation", 0),
("SampleComputeLocation", 1),
("SampleComputeLocation", 0),
("SampleComputeLocation", 0),
]
decision_6 = [
("SamplePerfectTile", [64, 4]),
("SampleCategorical", 2),
("SampleComputeLocation", -1),
("SampleComputeLocation", -2),
("SampleComputeLocation", -1),
("SampleComputeLocation", 0),
]
decision_7 = [
("SamplePerfectTile", [64, 4]),
("SampleCategorical", 2),
("SampleComputeLocation", 1),
("SampleComputeLocation", -2),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
]
decision_8 = [
("SampleCategorical", 3),
("SampleComputeLocation", -1),
("SampleComputeLocation", -1),
("SampleComputeLocation", 0),
]
mod = create_te_workload("SFM", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[sfm_0, sfm_1, sfm_2, |
sfm_3, sfm_4, sfm_5, sfm_6, sfm_7, sfm_8],
expected_decisions=[
decision_0,
decision_1,
decision_2,
decision_3,
decision_4,
decision_5,
decision_6,
decision_7,
decision_8,
],
)
def test_cpu_cbr():
@T.prim_func
def cbr_0(data: T.Buffer[(1, 224, 224, 3), "float32"], kernel: T.Buffer[(7, 7, 3, 64), "float32"], bias: T.Buffer[64, "float32"], bn_offset: T.Buffer[64, "float32"], bn_scale: T.Buffer[64, "float32"], compute: T.Buffer[(1, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
Conv2dOutput = T.alloc_buffer([1, 112, 112, 64], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i0_1, i1_1, i2_1, i3_1, i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 2, 7, 1, 1, 2, 2, 32, 7, 7, 1, 1, 1, 4, 1, 1, 1, 3, 1, 28, 2, 2):
with T.block("Conv2dOutput"):
nn = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
yy = T.axis.spatial(112, i1_0 * 56 + i1_1 * 28 + i1_2 * 28 + i1_3)
xx = T.axis.spatial(112, i2_0 * 16 + i2_1 * 8 + i2_2 * 2 + i2_3)
ff = T.axis.spatial(64, i3_0 * 64 + i3_1 * 2 + i3_2 * 2 + i3_3)
ry = T.axis.reduce(7, i4_1 + i4_0)
rx = T.axis.reduce(7, i5_0 + i5_1)
rc = T.axis.reduce(3, i6_0 * 3 + i6_1)
T.reads(data[nn, yy * 2 + ry - 3, xx * 2 + rx - 3, rc], kernel[ry, rx, rc, ff])
T.writes(Conv2dOutput[nn, yy, xx, ff])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
Conv2dOutpu |
t[nn, yy, xx, ff] = T.float32(0)
Conv2dOutput[nn, yy, xx, ff] = Conv2dOutput[nn, yy, xx, ff] + T.if_then_else(3 <= yy * 2 + ry and yy * 2 + ry < 227 and 3 <= xx * 2 + rx and xx * 2 + rx < 227, data[nn, yy * 2 + ry - 3, xx * 2 + rx - 3, rc], T.float32(0), dtype="float32") * kernel[ry, rx, rc, ff]
for i0, i1, i2, i3 in T.grid(1, 112, 112, 64):
with T.block("compute"):
i0_4, i1_4, i2_4, i3_4 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(Conv2dOutput[i0_4, i1_4, i2_4, i3_4], bias[i3_4], bn_scale[i3_4], bn_offset[i3_4])
T.writes(compute[i0_4, i1_4, i2_4, i3_4])
compute[i0_4, i1_4, i2_4, i3_4] = T.max((Conv2dOutput[i0_4, i1_4, i2_4, i3_4] + bias[i3_4]) * bn_scale[i3_4] + bn_offset[i3_4], T.float32(0))
@T.prim_func
def cbr_1(data: T.Buffer[(1, 224, 224, 3), "float32"], kernel: T.Buffer[(7, 7, 3, 64), "float32"], bias: T.Buffer[64, "float32"], bn_offset: T.Buffer[64, "float32"], bn_scale: T.Buffer[64, "float32"], compute: T.Buffer[(1, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64})
PaddedInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
Conv2dOutput = T.alloc_buffer([1, 112, 112, 64], dtype="float32")
for i0_0, i1_0 in T.grid(1, 2):
for ax0, ax1, ax2, ax3 in T.grid(1, 117, 229, 3):
with T.block("PaddedInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(230, i1_0 * 112 + ax1)
i2 = T.axis.spatial(230, ax2)
i3 = T.axis.spatial(3, ax3)
T.reads(data[i0, i1 - 3, i2 - 3, i3])
T.writes(PaddedInput[i0 |
, i1, i2, i3])
PaddedInput[i0, i1, i2, i3] = T.if_then_else(3 <= i1 and i1 < 227 and 3 <= i2 and i2 < 227, data[i0, i1 - 3, i2 - 3, i3], T.float32(0), dtype="float32")
for i2_0, i3_0, i0_1, i1_1, i2_1, i3_1 in T.grid(7, 1, 1, 2, 2, 32):
for i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(7, 7, 1, 1, 1, 4, 1, 1, 1, 3, 1, 28, 2, 2):
with T.block("Conv2dOutput"):
nn = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2)
yy = T.axis.spatial(112, i1_0 * 56 + i1_1 * 28 + i1_2 * 28 + i1_3)
xx = T.axis.spatial(112, i2_0 * 16 + i2_1 * 8 + i2_2 * 2 + i2_3)
ff = T.axis.spatial(64, i3_0 * 64 + i3_1 * 2 + i3_2 * 2 + i3_3)
ry = T.axis.reduce(7, i4_1 + i4_0)
rx = T.axis.reduce(7, i5_0 + i5_1)
rc = T.axis.reduce(3, i6_0 * 3 + i6_1)
T.reads(PaddedInput[nn, yy * 2 + ry, xx * 2 + rx, rc], kernel[ry, rx, rc, ff])
T.writes(Conv2dOutput[nn, yy, xx, ff])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
Conv2dOutput[nn, yy, xx, ff] = T.float32(0)
Conv2dOutput[nn, yy, xx, ff] = Conv2dOutput[nn, yy, xx, ff] + PaddedInput[nn, yy * 2 + ry, xx * 2 + rx, rc] * kernel[ry, rx, rc, ff]
for ax0, ax1, ax2, ax3 in T.grid(1, 28, 8, 2):
with T.block("compute"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(112, i1_0 * 56 + i1_1 * 28 + ax1)
i2 = T.axis.spatial(112, i2_0 * 16 + i2_1 * 8 + ax2)
i3 = T.axis.spatial(64, i3_1 * 2 + ax3)
T.reads(Conv2dOutput[i0, i1, i2, i |
3], bias[i3], bn_scale[i3], bn_offset[i3])
T.writes(compute[i0, i1, i2, i3])
compute[i0, i1, i2, i3] = T.max((Conv2dOutput[i0, i1, i2, i3] + bias[i3]) * bn_scale[i3] + bn_offset[i3], T.float32(0))
@T.prim_func
def cbr_2(data: T.Buffer[(1, 224, 224, 3), "float32"], kernel: T.Buffer[(7, 7, 3, 64), "float32"], bias: T.Buffer[64, "float32"], bn_offset: T.Buffer[64, "float32"], bn_scale: T.Buffer[64, "float32"], compute: T.Buffer[(1, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
PaddedInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
Conv2dOutput = T.alloc_buffer([1, 112, 112, 64], dtype="float32")
for i0_0, i1_0 in T.grid(1, 2):
for ax0, ax1, ax2, ax3 in T.grid(1, 117, 229, 3):
with T.block("PaddedInput"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(230, i1_0 * 112 + ax1)
i2 = T.axis.spatial(230, ax2)
i3 = T.axis.spatial(3, ax3)
T.reads(data[i0, i1 - 3, i2 - 3, i3])
T.writes(PaddedInput[i0, i1, i2, i3])
PaddedInput[i0, i1, i2, i3] = T.if_then_else(3 <= i1 and i1 < 227 and 3 <= i2 and i2 < 227, data[i0, i1 - 3, i2 - 3, i3], T.float32(0), dtype="float32")
for i2_0, i3_0 in T.grid(7, 1):
for i0_1, i1_1, i2_1, i3_1, i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 2, 2, 32, 7, 7, 1, 1, 1, 4, 1, 1, 1, 3, 1, 28, 2, 2):
with T.block("Conv2dOutput"):
nn = T.axis.spatial(1, i0_3 + i0_0 + i0_1 + i0_2) |
yy = T.axis.spatial(112, i1_0 * 56 + i1_1 * 28 + i1_2 * 28 + i1_3)
xx = T.axis.spatial(112, i2_0 * 16 + i2_1 * 8 + i2_2 * 2 + i2_3)
ff = T.axis.spatial(64, i3_0 * 64 + i3_1 * 2 + i3_2 * 2 + i3_3)
ry = T.axis.reduce(7, i4_1 + i4_0)
rx = T.axis.reduce(7, i5_0 + i5_1)
rc = T.axis.reduce(3, i6_0 * 3 + i6_1)
T.reads(PaddedInput[nn, yy * 2 + ry, xx * 2 + rx, rc], kernel[ry, rx, rc, ff])
T.writes(Conv2dOutput[nn, yy, xx, ff])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
Conv2dOutput[nn, yy, xx, ff] = T.float32(0)
Conv2dOutput[nn, yy, xx, ff] = Conv2dOutput[nn, yy, xx, ff] + PaddedInput[nn, yy * 2 + ry, xx * 2 + rx, rc] * kernel[ry, rx, rc, ff]
for ax0, ax1, ax2, ax3 in T.grid(1, 56, 16, 64):
with T.block("compute"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(112, i1_0 * 56 + ax1)
i2 = T.axis.spatial(112, i2_0 * 16 + ax2)
i3 = T.axis.spatial(64, ax3)
T.reads(Conv2dOutput[i0, i1, i2, i3], bias[i3], bn_scale[i3], bn_offset[i3])
T.writes(compute[i0, i1, i2, i3])
compute[i0, i1, i2, i3] = T.max((Conv2dOutput[i0, i1, i2, i3] + bias[i3]) * bn_scale[i3] + bn_offset[i3], T.float32(0))
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [2, 2, 1, 28]),
("SamplePerfectTile", [7, 2, 4, 2]),
("SamplePerfectTile", [1, 32, 1, 2]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 2), |
("SampleComputeLocation", -2),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [2, 2, 1, 28]),
("SamplePerfectTile", [7, 2, 4, 2]),
("SamplePerfectTile", [1, 32, 1, 2]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 3),
("SampleComputeLocation", 1),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [2, 2, 1, 28]),
("SamplePerfectTile", [7, 2, 4, 2]),
("SamplePerfectTile", [1, 32, 1, 2]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [7, 1]),
("SamplePerfectTile", [1, 3]),
("SampleCategorical", 2),
("SampleComputeLocation", 1),
]
mod = create_te_workload("CBR", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cbr_0, cbr_1, cbr_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_tbg():
@T.prim_func
def tbg_0(query: T.Buffer[(1, 128, 12, 64), "float32"], value: T.Buffer[(1, 128, 12, 64), "float32"], C: T.Buffer[(1, 12, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
query_T = T.alloc_buffer([1, 12, 128, 64], dtype="float32")
value_T = T.alloc_buffer([1, 12, 64, 128], dtype="float32")
C_global = T.alloc_buffer([1, 12, 128, 128], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i0_1, i1_1, i2_1 in T.grid(1, 1, 1, 2, 1, 6, 2):
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 64, 64):
with T.block("value_T"):
b = T.axis.spatial(1, ax0) |
h = T.axis.spatial(12, i1_1 * 2 + ax1)
d = T.axis.spatial(64, ax2)
l = T.axis.spatial(128, i3_0 * 64 + ax3)
T.reads(value[b, l, h, d])
T.writes(value_T[b, h, d, l])
value_T[b, h, d, l] = value[b, l, h, d]
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 64, 64):
with T.block("query_T"):
b = T.axis.spatial(1, ax0)
h = T.axis.spatial(12, i1_1 * 2 + ax1)
l = T.axis.spatial(128, i2_1 * 64 + ax2)
d = T.axis.spatial(64, ax3)
T.reads(query[b, l, h, d])
T.writes(query_T[b, h, l, d])
query_T[b, h, l, d] = query[b, l, h, d]
for i3_1 in T.serial(8):
for i4_0, i0_2, i1_2, i2_2, i3_2, i4_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 1, 2, 2, 4, 64, 1, 1, 32, 2):
with T.block("C"):
b = T.axis.spatial(1, i0_1 + i0_2 + i0_3 + i0_0)
h = T.axis.spatial(12, i1_0 * 12 + i1_1 * 2 + i1_2 + i1_3)
i = T.axis.spatial(128, i2_0 * 128 + i2_1 * 64 + i2_2 * 32 + i2_3)
j = T.axis.spatial(128, i3_0 * 64 + i3_1 * 8 + i3_2 * 2 + i3_3)
k = T.axis.reduce(64, i4_0 * 64 + i4_1)
T.reads(query_T[b, h, i, k], value_T[b, h, k, j])
T.writes(C_global[b, h, i, j])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
C_global[b, h, i, j] = T.float32(0)
C_global[b, h, i, j] = C_global[b, h, i, j] + query_T[b, h, i, k] * value_T[b, h, k, j]
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 64, 8):
with T.block("C_global"): |
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(12, i1_1 * 2 + ax1)
v2 = T.axis.spatial(128, i2_1 * 64 + ax2)
v3 = T.axis.spatial(128, i3_0 * 64 + i3_1 * 8 + ax3)
T.reads(C_global[v0, v1, v2, v3])
T.writes(C[v0, v1, v2, v3])
C[v0, v1, v2, v3] = C_global[v0, v1, v2, v3]
@T.prim_func
def tbg_1(query: T.Buffer[(1, 128, 12, 64), "float32"], value: T.Buffer[(1, 128, 12, 64), "float32"], C: T.Buffer[(1, 12, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
query_T = T.alloc_buffer([1, 12, 128, 64], dtype="float32")
value_T = T.alloc_buffer([1, 12, 64, 128], dtype="float32")
C_global = T.alloc_buffer([1, 12, 128, 128], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 12, 128, 64):
with T.block("query_T"):
b, h, l, d = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(query[b, l, h, d])
T.writes(query_T[b, h, l, d])
query_T[b, h, l, d] = query[b, l, h, d]
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 1, 1, 2):
for i0_1, i1_1, i2_1, i3_1, i4_0, i0_2, i1_2, i2_2, i3_2, i4_1 in T.grid(1, 6, 2, 8, 1, 1, 2, 2, 4, 64):
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 1, 2):
with T.block("value_T"):
b = T.axis.spatial(1, ax0)
h = T.axis.spatial(12, i1_1 * 2 + i1_2 + ax1)
d = T.axis.spatial(64, i4_1 + ax2)
l = T.axis.spatial(128, i3_0 * 64 + i3_1 * 8 + i3_2 * 2 + ax3) |
T.reads(value[b, l, h, d])
T.writes(value_T[b, h, d, l])
value_T[b, h, d, l] = value[b, l, h, d]
for i0_3, i1_3, i2_3, i3_3 in T.grid(1, 1, 32, 2):
with T.block("C"):
b = T.axis.spatial(1, i0_1 + i0_2 + i0_3 + i0_0)
h = T.axis.spatial(12, i1_0 * 12 + i1_1 * 2 + i1_2 + i1_3)
i = T.axis.spatial(128, i2_0 * 128 + i2_1 * 64 + i2_2 * 32 + i2_3)
j = T.axis.spatial(128, i3_0 * 64 + i3_1 * 8 + i3_2 * 2 + i3_3)
k = T.axis.reduce(64, i4_0 * 64 + i4_1)
T.reads(query_T[b, h, i, k], value_T[b, h, k, j])
T.writes(C_global[b, h, i, j])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
C_global[b, h, i, j] = T.float32(0)
C_global[b, h, i, j] = C_global[b, h, i, j] + query_T[b, h, i, k] * value_T[b, h, k, j]
for ax0, ax1, ax2, ax3 in T.grid(1, 12, 128, 64):
with T.block("C_global"):
v0, v1, v2 = T.axis.remap("SSS", [ax0, ax1, ax2])
v3 = T.axis.spatial(128, i3_0 * 64 + ax3)
T.reads(C_global[v0, v1, v2, v3])
T.writes(C[v0, v1, v2, v3])
C[v0, v1, v2, v3] = C_global[v0, v1, v2, v3]
@T.prim_func
def tbg_2(query: T.Buffer[(1, 128, 12, 64), "float32"], value: T.Buffer[(1, 128, 12, 64), "float32"], C: T.Buffer[(1, 12, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64}) |
value_T = T.alloc_buffer([1, 12, 64, 128], dtype="float32")
for i0_0, i1_0, i2_0, i3_0, i0_1, i1_1, i2_1, i3_1 in T.grid(1, 1, 1, 2, 1, 6, 2, 8):
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 64, 8):
with T.block("value_T"):
b = T.axis.spatial(1, ax0)
h = T.axis.spatial(12, i1_1 * 2 + ax1)
d = T.axis.spatial(64, ax2)
l = T.axis.spatial(128, i3_0 * 64 + i3_1 * 8 + ax3)
T.reads(value[b, l, h, d])
T.writes(value_T[b, h, d, l])
value_T[b, h, d, l] = value[b, l, h, d]
for i4_0, i0_2, i1_2, i2_2, i3_2, i4_1, i0_3, i1_3, i2_3, i3_3 in T.grid(1, 1, 2, 2, 4, 64, 1, 1, 32, 2):
with T.block("C"):
b = T.axis.spatial(1, i0_1 + i0_2 + i0_3 + i0_0)
h = T.axis.spatial(12, i1_0 * 12 + i1_1 * 2 + i1_2 + i1_3)
i = T.axis.spatial(128, i2_0 * 128 + i2_1 * 64 + i2_2 * 32 + i2_3)
j = T.axis.spatial(128, i3_0 * 64 + i3_1 * 8 + i3_2 * 2 + i3_3)
k = T.axis.reduce(64, i4_0 * 64 + i4_1)
T.reads(query[b, i, h, k], value_T[b, h, k, j])
T.writes(C[b, h, i, j])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
with T.init():
C[b, h, i, j] = T.float32(0)
C[b, h, i, j] = C[b, h, i, j] + query[b, i, h, k] * value_T[b, h, k, j]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 6, 2, 1]),
("SamplePerfectTile", [1, 2, 2, 32]),
("SamplePerfectTile", [2, 8, 4, 2]),
("SamplePerfectTile", [1, 64]),
("SampleCategorical", 2),
("SampleComputeLocation", 6),
("SampleComputeLocation", 6),
]
decision_1 = [
("SamplePerfectTile |
", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 6, 2, 1]),
("SamplePerfectTile", [1, 2, 2, 32]),
("SamplePerfectTile", [2, 8, 4, 2]),
("SamplePerfectTile", [1, 64]),
("SampleCategorical", 2),
("SampleComputeLocation", 13),
("SampleComputeLocation", -1),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 6, 2, 1]),
("SamplePerfectTile", [1, 2, 2, 32]),
("SamplePerfectTile", [2, 8, 4, 2]),
("SamplePerfectTile", [1, 64]),
("SampleCategorical", 3),
("SampleComputeLocation", 7),
("SampleComputeLocation", -2),
]
mod = create_te_workload("TBG", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[tbg_0, tbg_1, tbg_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
if __name__ == "__main__":
test_cpu_c1d()
test_cpu_c2d()
test_cpu_c3d()
test_cpu_cap()
test_cpu_dep()
test_cpu_dil()
test_cpu_gmm()
test_cpu_grp()
test_cpu_t2d()
test_cpu_nrm()
test_cpu_sfm()
test_cpu_cbr()
test_cpu_tbg() |
"""Tests for MetaSchedule search space on CPU"""
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
print_sketches,
)
from tvm.meta_schedule.testing.te_workload |
import create_te_workload
from tvm.script |
import tir as T
from tvm.target |
import Target
def _target():
return Target("aws/cpu/c5.9xlarge")
def _design_space(mod):
return generate_design_space(
kind="llvm",
mod=mod,
target=_target(),
types=ms.ScheduleRule,
)
def test_cpu_nhwc():
@T.prim_func
def cpu_nhwc_0(X: T.Buffer[(1, 14, 14, 128), "float32"], W: T.Buffer[(6, 6, 128, 128), "float32"], conv2d_winograd: T.Buffer[(1, 12, 12, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
data_pad = T.alloc_buffer([1, 16, 16, 128], dtype="float32")
input_tile = T.alloc_buffer([6, 6, 9, 128], dtype="float32")
data_pack = T.alloc_buffer([6, 6, 9, 128], dtype="float32")
bgemm = T.alloc_buffer([6, 6, 9, 128], dtype="float32")
inverse = T.alloc_buffer([4, 4, 9, 128], dtype="float32")
bgemm_global = T.alloc_buffer([6, 6, 9, 128], dtype="float32")
for i2_0 in T.serial(9):
for ax0, ax1, ax2, ax3 in T.grid(1, 6, 6, 128):
with T.block("data_pad"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(16, i2_0
i2 = T.axis.spatial(16, i2_0 % 3 * 4 + ax2)
i3 = T.axis.spatial(128, ax3)
T.reads(X[i0, i1, i2, i3])
T.writes(data_pad[i0, i1, i2, i3])
T.block_attr({"schedule_rule":"None"})
data_pad[i0, i1, i2, i3] = T.if_then_else(0 <= i1 and i1 < 14 and 0 <= i2 and i2 < 14, X[i0, i1, i2, i3], T.float32(0), dtype="float32")
for i3_0 in T.serial(2):
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 1, 64):
with T.block("input |
_tile"):
eps, nu = T.axis.remap("SS", [ax0, ax1])
p = T.axis.spatial(9, i2_0 + ax2)
ci = T.axis.spatial(128, i3_0 * 64 + ax3)
T.reads(data_pad[p
T.writes(input_tile[eps, nu, p, ci])
T.block_attr({"schedule_rule":"None"})
input_tile[eps, nu, p, ci] = data_pad[p
for i2_1, i3_1 in T.grid(1, 64):
for i0 in T.unroll(6):
for i1 in T.unroll(6):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("data_pack"):
eps, nu = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(9, i2_0 + i2_1)
ci = T.axis.spatial(128, i3_0 * 64 + i3_1)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(input_tile[r_a, r_b, p, ci])
T.writes(data_pack[eps, nu, p, ci])
T.block_attr({"schedule_rule":"conv2d_nhwc_winograd_data_pack"})
with T.init():
data_pack[eps, nu, p, ci] = T.float32(0)
data_pack[eps, nu, p, ci] = data_pack[eps, nu, p, ci] + input_tile[r_a, r_b, p, ci] * T.Select(r_a % 6 == 5 and eps % 6 == 5, T.float32(1), T.Select(r_a % 6 == 5 and eps % 6 == 4, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 3, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 2, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 1, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 0, T.float32(0), T.Select(r_a % 6 == 4 and eps % 6 == 5, T.float32(1. |
5), T.Select(r_a % 6 == 4 and eps % 6 == 4, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 3, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 2, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 1, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 0, T.float32(1), T.Select(r_a % 6 == 3 and eps % 6 == 5, T.float32(-2), T.Select(r_a % 6 == 3 and eps % 6 == 4, T.float32(-0.5), T.Select(r_a % 6 == 3 and eps % 6 == 3, T.float32(2), T.Select(r_a % 6 == 3 and eps % 6 == 2, T.float32(2.5), T.Select(r_a % 6 == 3 and eps % 6 == 1, T.float32(0.5), T.Select(r_a % 6 == 3 and eps % 6 == 0, T.float32(1.5), T.Select(r_a % 6 == 2 and eps % 6 == 5, T.float32(-1.5), T.Select(r_a % 6 == 2 and eps % 6 == 4, T.float32(-1), T.Select(r_a % 6 == 2 and eps % 6 == 3, T.float32(-1), T.Select(r_a % 6 == 2 and eps % 6 == 2, T.float32(0.5), T.Select(r_a % 6 == 2 and eps % 6 == 1, T.float32(-2.5), T.Select(r_a % 6 == 2 and eps % 6 == 0, T.float32(-2), T.Select(r_a % 6 == 1 and eps % 6 == 5, T.float32(1), T.Select(r_a % 6 == 1 and eps % 6 == 4, T.float32(0.5), T.Select(r_a % 6 == 1 and eps % 6 == 3, T.float32(-2), T.Select(r_a % 6 == 1 and eps % 6 == 2, T.float32(-1), T.Select(r_a % 6 == 1 and eps % 6 == 1, T.float32(1), T.Select(r_a % 6 == 1 and eps % 6 == 0, T.float32(-1.5), T.Select(r_a % 6 == 0 and eps % 6 == 5, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 4, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 3, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 2, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 1, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))))))))))))))) * T.Select(r_b % 6 == 5 and nu % 6 == 5, T.float32(1), T.Select(r_b % 6 == 5 and nu % 6 == 4, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 3, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 2, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 1, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 0, T.float32(0), T.Select(r_b % 6 == 4 and nu % 6 == 5, T.float32(1.5), T.Select(r_b % 6 == 4 and |
nu % 6 == 4, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 3, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 2, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 1, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 0, T.float32(1), T.Select(r_b % 6 == 3 and nu % 6 == 5, T.float32(-2), T.Select(r_b % 6 == 3 and nu % 6 == 4, T.float32(-0.5), T.Select(r_b % 6 == 3 and nu % 6 == 3, T.float32(2), T.Select(r_b % 6 == 3 and nu % 6 == 2, T.float32(2.5), T.Select(r_b % 6 == 3 and nu % 6 == 1, T.float32(0.5), T.Select(r_b % 6 == 3 and nu % 6 == 0, T.float32(1.5), T.Select(r_b % 6 == 2 and nu % 6 == 5, T.float32(-1.5), T.Select(r_b % 6 == 2 and nu % 6 == 4, T.float32(-1), T.Select(r_b % 6 == 2 and nu % 6 == 3, T.float32(-1), T.Select(r_b % 6 == 2 and nu % 6 == 2, T.float32(0.5), T.Select(r_b % 6 == 2 and nu % 6 == 1, T.float32(-2.5), T.Select(r_b % 6 == 2 and nu % 6 == 0, T.float32(-2), T.Select(r_b % 6 == 1 and nu % 6 == 5, T.float32(1), T.Select(r_b % 6 == 1 and nu % 6 == 4, T.float32(0.5), T.Select(r_b % 6 == 1 and nu % 6 == 3, T.float32(-2), T.Select(r_b % 6 == 1 and nu % 6 == 2, T.float32(-1), T.Select(r_b % 6 == 1 and nu % 6 == 1, T.float32(1), T.Select(r_b % 6 == 1 and nu % 6 == 0, T.float32(-1.5), T.Select(r_b % 6 == 0 and nu % 6 == 5, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 4, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 3, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 2, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 1, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))))))))))))))
for i0_0, i1_0, i2_0, i3_0, i0_1, i1_1, i2_1, i3_1 in T.grid(3, 2, 3, 1, 1, 1, 1, 1):
for i4_0, i0_2, i1_2, i2_2, i3_2, i4_1, i0_3, i1_3, i2_3, i3_3 in T.grid(32, 1, 1, 1, 2, 4, 2, 3, 3, 64):
with T.block("bgemm"):
eps = T.axis.spatial(6, i0_0 * 2 + i0_1 * 2 + i0_2 * 2 + i0_3)
nu = T.axis.spatial(6, i1_0 * 3 + i1_1 * 3 + i1_2 * 3 + i1_3) |
p = T.axis.spatial(9, i2_0 * 3 + i2_1 * 3 + i2_2 * 3 + i2_3)
co = T.axis.spatial(128, i3_0 * 128 + i3_1 * 128 + i3_2 * 64 + i3_3)
ci = T.axis.reduce(128, i4_0 * 4 + i4_1)
T.reads(data_pack[eps, nu, p, ci], W[eps, nu, co, ci])
T.writes(bgemm_global[eps, nu, p, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS", "meta_schedule.write_cache_level":[2]})
with T.init():
bgemm_global[eps, nu, p, co] = T.float32(0)
bgemm_global[eps, nu, p, co] = bgemm_global[eps, nu, p, co] + data_pack[eps, nu, p, ci] * W[eps, nu, co, ci]
for ax0, ax1, ax2, ax3 in T.grid(2, 3, 3, 128):
with T.block("bgemm_global"):
v0 = T.axis.spatial(6, i0_0 * 2 + ax0)
v1 = T.axis.spatial(6, i1_0 * 3 + ax1)
v2 = T.axis.spatial(9, i2_0 * 3 + ax2)
v3 = T.axis.spatial(128, ax3)
T.reads(bgemm_global[v0, v1, v2, v3])
T.writes(bgemm[v0, v1, v2, v3])
bgemm[v0, v1, v2, v3] = bgemm_global[v0, v1, v2, v3]
for i2_0, i3_0, i2_1, i3_1 in T.grid(3, 8, 3, 16):
for i0 in T.unroll(4):
for i1 in T.unroll(4):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("inverse"):
vh, vw = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(9, i2_0 * 3 + i2_1)
co = T.axis.spatial(128, i3_0 * 16 + i3_1)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(bgemm[r_a, r_b, p, co])
T.writes(inverse[vh, vw, p, co]) |
T.block_attr({"schedule_rule":"conv2d_nhwc_winograd_inverse"})
with T.init():
inverse[vh, vw, p, co] = T.float32(0)
inverse[vh, vw, p, co] = inverse[vh, vw, p, co] + bgemm[r_a, r_b, p, co] * T.Select(r_a % 6 == 5 and vh % 4 == 3, T.float32(1), T.Select(r_a % 6 == 5 and vh % 4 == 2, T.float32(0), T.Select(r_a % 6 == 5 and vh % 4 == 1, T.float32(0), T.Select(r_a % 6 == 5 and vh % 4 == 0, T.float32(0), T.Select(r_a % 6 == 4 and vh % 4 == 3, T.float32(-8), T.Select(r_a % 6 == 4 and vh % 4 == 2, T.float32(4), T.Select(r_a % 6 == 4 and vh % 4 == 1, T.float32(-2), T.Select(r_a % 6 == 4 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 3 and vh % 4 == 3, T.float32(0.125), T.Select(r_a % 6 == 3 and vh % 4 == 2, T.float32(0.25), T.Select(r_a % 6 == 3 and vh % 4 == 1, T.float32(0.5), T.Select(r_a % 6 == 3 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 3, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 2, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 1, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 1 and vh % 4 == 3, T.float32(-1), T.Select(r_a % 6 == 1 and vh % 4 == 2, T.float32(1), T.Select(r_a % 6 == 1 and vh % 4 == 1, T.float32(-1), T.Select(r_a % 6 == 1 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 0 and vh % 4 == 3, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 2, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 1, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))) * T.Select(r_b % 6 == 5 and vw % 4 == 3, T.float32(1), T.Select(r_b % 6 == 5 and vw % 4 == 2, T.float32(0), T.Select(r_b % 6 == 5 and vw % 4 == 1, T.float32(0), T.Select(r_b % 6 == 5 and vw % 4 == 0, T.float32(0), T.Select(r_b % 6 == 4 and vw % 4 == 3, T.float32(-8), T.Select(r_b % 6 == 4 and vw % 4 == 2, T.float32(4), T.Select(r_b % 6 == 4 and vw % 4 == 1, T.float32(-2), T.Select(r_b % 6 == 4 and vw % 4 == 0, T. |
float32(1), T.Select(r_b % 6 == 3 and vw % 4 == 3, T.float32(0.125), T.Select(r_b % 6 == 3 and vw % 4 == 2, T.float32(0.25), T.Select(r_b % 6 == 3 and vw % 4 == 1, T.float32(0.5), T.Select(r_b % 6 == 3 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 3, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 2, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 1, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 1 and vw % 4 == 3, T.float32(-1), T.Select(r_b % 6 == 1 and vw % 4 == 2, T.float32(1), T.Select(r_b % 6 == 1 and vw % 4 == 1, T.float32(-1), T.Select(r_b % 6 == 1 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 0 and vw % 4 == 3, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 2, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 1, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))
for i0, i1, i2, i3 in T.grid(1, 12, 12, 128):
with T.block("conv2d_winograd"):
n, h, w, co = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inverse[h % 4, w % 4, n * 9 + h
T.writes(conv2d_winograd[n, h, w, co])
conv2d_winograd[n, h, w, co] = inverse[h % 4, w % 4, n * 9 + h
decision_0 = [
("SamplePerfectTile", [3, 3]),
("SamplePerfectTile", [8, 16]),
("SamplePerfectTile", [9, 1]),
("SamplePerfectTile", [2, 64]),
("SampleComputeLocation", 1),
("SampleComputeLocation", 0),
("SamplePerfectTile", [3, 1, 1, 2]),
("SamplePerfectTile", [2, 1, 1, 3]),
("SamplePerfectTile", [3, 1, 1, 3]),
("SamplePerfectTile", [1, 1, 2, 64]),
("SamplePerfectTile", [32, 4]),
("SampleCategorical", 2),
]
with _target():
mod = create_te_workload("C2D_WIN_NHWC", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cpu_nhwc_0],
expected_decisions=[decision_0] |
,
)
if __name__ == "__main__":
test_cpu_nhwc() |
"""Tests for MetaSchedule search space on CUDA"""
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
print_sketches,
)
from tvm.meta_schedule.testing.te_workload |
import create_te_workload
from tvm.script |
import tir as T
from tvm.target |
import Target
def _target():
return Target("nvidia/geforce-rtx-3070")
def _design_space(mod):
return generate_design_space(
kind="cuda",
mod=mod,
target=_target(),
types=ms.ScheduleRule,
)
def test_cuda_c1d():
@T.prim_func
def c1d_0(inputs: T.Buffer[(1, 256, 64), "float32"], weight: T.Buffer[(3, 64, 128), "float32"], conv1d_nlc: T.Buffer[(1, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":16})
conv1d_nlc_local = T.alloc_buffer([1, 128, 128], dtype="float32", scope="local")
PadInput_shared = T.alloc_buffer([1, 258, 64], dtype="float32", scope="shared")
weight_shared = T.alloc_buffer([3, 64, 128], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_fused in T.thread_binding(4, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_fused in T.thread_binding(16, thread="vthread.x"):
for i0_2_i1_2_i2_2_fused in T.thread_binding(4, thread="threadIdx.x"):
for i3_0, i4_0 in T.grid(1, 16):
for ax0_ax1_ax2_fused in T.serial(260):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(258, i0_0_i1_0_i2_0_fused * 64 + ax0_ax1_ax2_fused
v2 = T.axis.spatial(64, i4_0 * 4 + ax0_ax1_ax2_fused % 4)
T.reads(inputs[v0, v1 - 1, v2])
T.writes(PadInput_shared[v0, v1, v2])
T.block_attr({"meta_schedule.cooperative_fetch":4})
PadInput_shared[v0, v1, v2] = T.if_then_else(1 <= v1 and v1 < 257, inputs[v0, v1 - 1, v2], T.float32(0), dtype="float32") |
for ax0_ax1_ax2_fused in T.serial(1536):
with T.block("weight_shared"):
v0 = T.axis.spatial(3, ax0_ax1_ax2_fused
v1 = T.axis.spatial(64, i4_0 * 4 + ax0_ax1_ax2_fused % 512
v2 = T.axis.spatial(128, ax0_ax1_ax2_fused % 128)
T.reads(weight[v0, v1, v2])
T.writes(weight_shared[v0, v1, v2])
T.block_attr({"meta_schedule.cooperative_fetch":3})
weight_shared[v0, v1, v2] = weight[v0, v1, v2]
for i3_1, i4_1, i0_3, i1_3, i2_3, i3_2, i4_2, i0_4, i1_4, i2_4 in T.grid(1, 2, 1, 1, 2, 3, 2, 1, 4, 8):
with T.block("conv1d_nlc"):
n = T.axis.spatial(1, i0_4 + i0_3)
l = T.axis.spatial(128, i0_0_i1_0_i2_0_fused * 32 + i0_1_i1_1_i2_1_fused
co = T.axis.spatial(128, i0_1_i1_1_i2_1_fused % 2 * 64 + i0_2_i1_2_i2_2_fused * 16 + i2_3 * 8 + i2_4)
rl = T.axis.reduce(3, i3_0 * 3 + i3_1 * 3 + i3_2)
rc = T.axis.reduce(64, i4_0 * 4 + i4_1 * 2 + i4_2)
T.reads(PadInput_shared[n, l * 2 + rl, co
T.writes(conv1d_nlc_local[n, l, co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
conv1d_nlc_local[n, l, co] = T.float32(0)
conv1d_nlc_local[n, l, co] = conv1d_nlc_local[n, l, co] + PadInput_shared[n, l * 2 + rl, co
for ax0, ax1, ax2 in T.grid(1, 4, 16): |
with T.block("conv1d_nlc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused * 32 + i0_1_i1_1_i2_1_fused
v2 = T.axis.spatial(128, i0_1_i1_1_i2_1_fused % 2 * 64 + i0_2_i1_2_i2_2_fused * 16 + ax2)
T.reads(conv1d_nlc_local[v0, v1, v2])
T.writes(conv1d_nlc[v0, v1, v2])
conv1d_nlc[v0, v1, v2] = conv1d_nlc_local[v0, v1, v2]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [4, 8, 1, 1, 4]),
("SamplePerfectTile", [1, 2, 4, 2, 8]),
("SamplePerfectTile", [1, 1, 3]),
("SamplePerfectTile", [16, 2, 2]),
("SampleCategorical", 3),
("SampleCategorical", 2),
("SampleCategorical", 1),
]
mod = create_te_workload("C1D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[c1d_0],
expected_decisions=[decision_0],
)
def test_cuda_c2d():
@T.prim_func
def c2d_0(inputs: T.Buffer[(1, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 3, 64), "float32"], conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":16})
conv2d_nhwc_local = T.alloc_buffer([1, 112, 112, 64], dtype="float32", scope="local")
PadInput_shared = T.alloc_buffer([1, 230, 230, 3], dtype="float32", scope="shared")
weight_shared = T.alloc_buffer([7, 7, 3, 64], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(56, thread="vthread.x"):
for i0 |
_2_i1_2_i2_2_i3_2_fused in T.thread_binding(14, thread="threadIdx.x"):
for i4_0, i5_0, i6_0 in T.grid(1, 1, 1):
for ax0_ax1_ax2_ax3_fused in T.serial(80379):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(230, ax0_ax1_ax2_ax3_fused
v2 = T.axis.spatial(230, i0_0_i1_0_i2_0_i3_0_fused
v3 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused % 3)
T.reads(inputs[v0, v1 - 3, v2 - 3, v3])
T.writes(PadInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":2})
PadInput_shared[v0, v1, v2, v3] = T.if_then_else(3 <= v1 and v1 < 227 and 3 <= v2 and v2 < 227, inputs[v0, v1 - 3, v2 - 3, v3], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_fused in T.serial(1176):
with T.block("weight_shared"):
v0 = T.axis.spatial(7, ax0_ax1_ax2_ax3_fused
v1 = T.axis.spatial(7, ax0_ax1_ax2_ax3_fused % 168
v2 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused % 24
v3 = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 8 * 8 + ax0_ax1_ax2_ax3_fused % 8)
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":4})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3, i4_2, i5_2, i6_2, i0_4, i1_4, i2_4, i3_4 in T.grid(1, 7, 1, 1, 8, 4, 1, 7, 1, 3, 1, 1, 1, 2): |
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_4)
h = T.axis.spatial(112, i1_4 + i0_2_i1_2_i2_2_i3_2_fused * 8 + i1_3)
w = T.axis.spatial(112, i0_0_i1_0_i2_0_i3_0_fused
co = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 8 * 8 + i0_1_i1_1_i2_1_i3_1_fused % 4 * 2 + i3_3 * 2 + i3_4)
rh = T.axis.reduce(7, i4_0 * 7 + i4_1 * 7 + i4_2)
rw = T.axis.reduce(7, i5_2 + i5_0 * 7 + i5_1)
rc = T.axis.reduce(3, i6_0 * 3 + i6_1 * 3 + i6_2)
T.reads(PadInput_shared[n, h * 2 + rh, w * 2 + rw, co
T.writes(conv2d_nhwc_local[n, h, w, co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
conv2d_nhwc_local[n, h, w, co] = T.float32(0)
conv2d_nhwc_local[n, h, w, co] = conv2d_nhwc_local[n, h, w, co] + PadInput_shared[n, h * 2 + rh, w * 2 + rw, co
for ax0, ax1, ax2, ax3 in T.grid(1, 8, 4, 2):
with T.block("conv2d_nhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(112, i0_2_i1_2_i2_2_i3_2_fused * 8 + ax1)
v2 = T.axis.spatial(112, i0_0_i1_0_i2_0_i3_0_fused
v3 = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 8 * 8 + i0_1_i1_1_i2_1_i3_1_fused % 4 * 2 + ax3)
T.reads(conv2d_nhwc_local[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3]) |
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_local[v0, v1, v2, v3]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 14, 8, 1]),
("SamplePerfectTile", [2, 14, 1, 4, 1]),
("SamplePerfectTile", [8, 4, 1, 1, 2]),
("SamplePerfectTile", [1, 1, 7]),
("SamplePerfectTile", [1, 7, 1]),
("SamplePerfectTile", [1, 1, 3]),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 1),
]
mod = create_te_workload("C2D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[c2d_0],
expected_decisions=[decision_0],
)
def test_cuda_c3d():
@T.prim_func
def c3d_0(inputs: T.Buffer[(1, 16, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 7, 3, 64), "float32"], conv3d_ndhwc: T.Buffer[(1, 8, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":16})
conv3d_ndhwc_local = T.alloc_buffer([1, 8, 112, 112, 64], dtype="float32", scope="local")
PadInput_shared = T.alloc_buffer([1, 22, 230, 230, 3], dtype="float32", scope="shared")
weight_shared = T.alloc_buffer([7, 7, 7, 3, 64], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_i3_0_i4_0_fused in T.thread_binding(2, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_i4_1_fused in T.thread_binding(8, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_i4_2_fused in T.thread_binding(392, thread="threadIdx.x"):
for i5_0, i6_0, i7_0, i8_0 in T.grid(1, 1, 1, 1):
for ax0_ax1_ax2_ax3_ax4_fused in T.serial(1687959):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0) |
v1 = T.axis.spatial(22, ax0_ax1_ax2_ax3_ax4_fused
v2 = T.axis.spatial(230, ax0_ax1_ax2_ax3_ax4_fused % 80379
v3 = T.axis.spatial(230, i0_0_i1_0_i2_0_i3_0_i4_0_fused * 112 + ax0_ax1_ax2_ax3_ax4_fused % 351
v4 = T.axis.spatial(3, ax0_ax1_ax2_ax3_ax4_fused % 3)
T.reads(inputs[v0, v1 - 3, v2 - 3, v3 - 3, v4])
T.writes(PadInput_shared[v0, v1, v2, v3, v4])
T.block_attr({"meta_schedule.cooperative_fetch":4})
PadInput_shared[v0, v1, v2, v3, v4] = T.if_then_else(3 <= v1 and v1 < 19 and 3 <= v2 and v2 < 227 and 3 <= v3 and v3 < 227, inputs[v0, v1 - 3, v2 - 3, v3 - 3, v4], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_ax4_fused in T.serial(65856):
with T.block("weight_shared"):
v0 = T.axis.spatial(7, ax0_ax1_ax2_ax3_ax4_fused
v1 = T.axis.spatial(7, ax0_ax1_ax2_ax3_ax4_fused % 9408
v2 = T.axis.spatial(7, ax0_ax1_ax2_ax3_ax4_fused % 1344
v3 = T.axis.spatial(3, ax0_ax1_ax2_ax3_ax4_fused % 192
v4 = T.axis.spatial(64, ax0_ax1_ax2_ax3_ax4_fused % 64)
T.reads(weight[v0, v1, v2, v3, v4])
T.writes(weight_shared[v0, v1, v2, v3, v4])
T.block_attr({"meta_schedule.cooperative_fetch":3})
weight_shared[v0, v1, v2, v3, v4] = weight[v0, v1, v2, v3, v4]
for i5_1, i6_1, i7_1, i8_1, i0_3, i1_3, i2_3, i3_3, i4_3, i5_2, i6_2, i7_2, i8_2, i0_4, i1_4, i2_4, i3_4, i4_4 in T.grid(7, 7, 1, 3, 1, 2, 2, 1, 32, 1, 1, 7, 1, 1, 1, 2, 4, 1):
with T.block |
("conv3d_ndhwc"):
n = T.axis.spatial(1, i0_4 + i0_3)
d = T.axis.spatial(8, i1_4 + i0_2_i1_2_i2_2_i3_2_i4_2_fused
h = T.axis.spatial(112, i0_1_i1_1_i2_1_i3_1_i4_1_fused
w = T.axis.spatial(112, i0_0_i1_0_i2_0_i3_0_i4_0_fused * 56 + i0_1_i1_1_i2_1_i3_1_i4_1_fused % 2 * 28 + i0_2_i1_2_i2_2_i3_2_i4_2_fused % 14
co = T.axis.spatial(64, i0_2_i1_2_i2_2_i3_2_i4_2_fused % 2 * 32 + i4_3 + i4_4)
rd = T.axis.reduce(7, i5_2 + i5_0 * 7 + i5_1)
rh = T.axis.reduce(7, i6_0 * 7 + i6_1 + i6_2)
rw = T.axis.reduce(7, i7_0 * 7 + i7_1 * 7 + i7_2)
rc = T.axis.reduce(3, i8_0 * 3 + i8_1 + i8_2)
T.reads(PadInput_shared[n, d * 2 + rd, h * 2 + rh, w * 2 + rw, co
T.writes(conv3d_ndhwc_local[n, d, h, w, co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
conv3d_ndhwc_local[n, d, h, w, co] = T.float32(0)
conv3d_ndhwc_local[n, d, h, w, co] = conv3d_ndhwc_local[n, d, h, w, co] + PadInput_shared[n, d * 2 + rd, h * 2 + rh, w * 2 + rw, co
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 2, 4, 4, 32):
with T.block("conv3d_ndhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, i0_2_i1_2_i2_2_i3_2_i4_2_fused
v2 = T.axis.spatial(112, i0_1_i1_1_i2_1_i3_1_i4_1_fused
v3 = T.axis.spatial(112, i0_0_i1_0_i2_0_i3_0_i |
4_0_fused * 56 + i0_1_i1_1_i2_1_i3_1_i4_1_fused % 2 * 28 + i0_2_i1_2_i2_2_i3_2_i4_2_fused % 14
v4 = T.axis.spatial(64, i0_2_i1_2_i2_2_i3_2_i4_2_fused % 2 * 32 + ax4)
T.reads(conv3d_ndhwc_local[v0, v1, v2, v3, v4])
T.writes(conv3d_ndhwc[v0, v1, v2, v3, v4])
conv3d_ndhwc[v0, v1, v2, v3, v4] = conv3d_ndhwc_local[v0, v1, v2, v3, v4]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 4, 2, 1]),
("SamplePerfectTile", [1, 4, 7, 2, 2]),
("SamplePerfectTile", [2, 2, 7, 1, 4]),
("SamplePerfectTile", [1, 1, 2, 32, 1]),
("SamplePerfectTile", [1, 7, 1]),
("SamplePerfectTile", [1, 7, 1]),
("SamplePerfectTile", [1, 1, 7]),
("SamplePerfectTile", [1, 3, 1]),
("SampleCategorical", 3),
("SampleCategorical", 2),
("SampleCategorical", 1),
]
mod = create_te_workload("C3D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[c3d_0],
expected_decisions=[decision_0],
)
def test_cuda_cap():
@T.prim_func
def cap_0(inputs: T.Buffer[(1, 16, 16, 4, 4, 32), "float32"], weight: T.Buffer[(3, 3, 4, 4, 32, 32), "float32"], conv2d_capsule_nhwijc: T.Buffer[(1, 8, 8, 4, 4, 32), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":64})
conv2d_capsule_nhwijc_local = T.alloc_buffer([1, 8, 8, 4, 4, 32], dtype="float32", scope="local")
PadInput_shared = T.alloc_buffer([1, 18, 18, 4, 4, 32], dtype="float32", scope="shared")
weight_shared = T.alloc_buffer([3, 3, 4, 4, 32, 32], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused in T.thread_binding |
(256, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_i4_1_i5_1_fused in T.thread_binding(1, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_i4_2_i5_2_fused in T.thread_binding(4, thread="threadIdx.x"):
for i6_0, i7_0, i8_0, i9_0 in T.grid(3, 3, 2, 8):
for ax0_ax1_ax2_ax3_ax4_ax5_fused in T.serial(48):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(18, i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused
v2 = T.axis.spatial(18, i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused % 64
v3 = T.axis.spatial(4, i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused % 8
v4 = T.axis.spatial(4, i8_0 * 2 + ax0_ax1_ax2_ax3_ax4_ax5_fused % 8
v5 = T.axis.spatial(32, i9_0 * 4 + ax0_ax1_ax2_ax3_ax4_ax5_fused % 4)
T.reads(inputs[v0, v1 - 1, v2 - 1, v3, v4, v5])
T.writes(PadInput_shared[v0, v1, v2, v3, v4, v5])
T.block_attr({"meta_schedule.cooperative_fetch":2})
PadInput_shared[v0, v1, v2, v3, v4, v5] = T.if_then_else(1 <= v1 and v1 < 17 and 1 <= v2 and v2 < 17, inputs[v0, v1 - 1, v2 - 1, v3, v4, v5], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_ax4_ax5_fused in T.serial(256):
with T.block("weight_shared"):
v0, v1 = T.axis.remap("SS", [i6_0, i7_0])
v2 = T.axis.spatial(4, i8_0 * 2 + ax0_ax1_ax2_ax3_ax4_ax5_fused
v3 = T.axis.spatial(4, ax0_ax1_ax2_ax3_ax4_ax5_fused % 128
v4 = T.axis.spatial(32, i9_0 * 4 + ax0_ax1_ax2_ax3_ax4_ax5_fused % 32 |
v5 = T.axis.spatial(32, i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused % 4 * 8 + ax0_ax1_ax2_ax3_ax4_ax5_fused % 8)
T.reads(weight[v0, v1, v2, v3, v4, v5])
T.writes(weight_shared[v0, v1, v2, v3, v4, v5])
T.block_attr({"meta_schedule.cooperative_fetch":4})
weight_shared[v0, v1, v2, v3, v4, v5] = weight[v0, v1, v2, v3, v4, v5]
for i6_1, i7_1, i8_1, i9_1, i0_3, i1_3, i2_3, i3_3, i4_3, i5_3, i6_2, i7_2, i8_2, i9_2, i0_4, i1_4, i2_4, i3_4, i4_4, i5_4 in T.grid(1, 1, 1, 4, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 8):
with T.block("conv2d_capsule_nhwijc"):
n = T.axis.spatial(1, i0_4 + i0_3)
h = T.axis.spatial(8, i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused
w = T.axis.spatial(8, i2_3 + i2_4 + i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused % 64
cap_i = T.axis.spatial(4, i3_3 + i3_4 + i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused % 8
cap_j = T.axis.spatial(4, i0_2_i1_2_i2_2_i3_2_i4_2_i5_2_fused % 2 * 2 + i4_3 * 2 + i4_4)
co = T.axis.spatial(32, i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused % 4 * 8 + i5_3 * 8 + i5_4)
rh = T.axis.reduce(3, i6_1 + i6_2 + i6_0)
rw = T.axis.reduce(3, i7_0 + i7_1 + i7_2)
cap_k = T.axis.reduce(4, i8_0 * 2 + i8_1 * 2 + i8_2)
rc = T.axis.reduce(32, i9_0 * 4 + i9_1 + i9_2)
T.reads(PadInput_shared[n, h * 2 + rh, w * 2 + rw, cap_i, cap_k, rc], weight_shared[rh, rw, cap_k, cap_j, rc, co])
T.writes(conv2d_capsule_nhwijc_local[n, h, w, cap_i, cap_j, co])
T.block_attr({"meta_schedule.t |
hread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
conv2d_capsule_nhwijc_local[n, h, w, cap_i, cap_j, co] = T.float32(0)
conv2d_capsule_nhwijc_local[n, h, w, cap_i, cap_j, co] = conv2d_capsule_nhwijc_local[n, h, w, cap_i, cap_j, co] + PadInput_shared[n, h * 2 + rh, w * 2 + rw, cap_i, cap_k, rc] * weight_shared[rh, rw, cap_k, cap_j, rc, co]
for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 2, 1, 1, 2, 8):
with T.block("conv2d_capsule_nhwijc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused
v2 = T.axis.spatial(8, i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused % 64
v3 = T.axis.spatial(4, i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused % 8
v4 = T.axis.spatial(4, i0_2_i1_2_i2_2_i3_2_i4_2_i5_2_fused % 2 * 2 + ax4)
v5 = T.axis.spatial(32, i0_0_i1_0_i2_0_i3_0_i4_0_i5_0_fused % 4 * 8 + ax5)
T.reads(conv2d_capsule_nhwijc_local[v0, v1, v2, v3, v4, v5])
T.writes(conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5])
conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5] = conv2d_capsule_nhwijc_local[v0, v1, v2, v3, v4, v5]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [4, 1, 1, 2, 1]),
("SamplePerfectTile", [8, 1, 1, 1, 1]),
("SamplePerfectTile", [2, 1, 2, 1, 1]),
("SamplePerfectTile", [1, 1, 2, 1, 2]),
("SamplePerfectTile", [4, 1, 1, 1, 8]),
("SamplePerfectTile", [3, 1, 1]),
("SamplePerfectTile", [3, 1, 1]),
("SamplePerfectTile", [2, 1, 2]),
("SamplePerfectTile", [8, |
4, 1]),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 2),
]
mod = create_te_workload("CAP", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cap_0],
expected_decisions=[decision_0],
)
def test_cuda_dep():
@T.prim_func
def dep_0(placeholder: T.Buffer[(1, 112, 112, 32), "float32"], placeholder_1: T.Buffer[(1, 3, 3, 32), "float32"], depth_conv2d_nhwc: T.Buffer[(1, 112, 112, 32), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":16})
depth_conv2d_nhwc_local = T.alloc_buffer([1, 112, 112, 32], dtype="float32", scope="local")
PadInput_shared = T.alloc_buffer([1, 114, 114, 32], dtype="float32", scope="shared")
placeholder_shared = T.alloc_buffer([1, 3, 3, 32], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(1, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(8, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(14, thread="threadIdx.x"):
for i4_0, i5_0 in T.grid(1, 1):
for ax0_ax1_ax2_ax3_fused in T.serial(415872):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(114, ax0_ax1_ax2_ax3_fused
v2 = T.axis.spatial(114, ax0_ax1_ax2_ax3_fused % 3648
v3 = T.axis.spatial(32, ax0_ax1_ax2_ax3_fused % 32)
T.reads(placeholder[v0, v1 - 1, v2 - 1, v3])
T.writes(PadInput_shared[v0, v1, v2, v3]) |
T.block_attr({"meta_schedule.cooperative_fetch":3})
PadInput_shared[v0, v1, v2, v3] = T.if_then_else(1 <= v1 and v1 < 113 and 1 <= v2 and v2 < 113, placeholder[v0, v1 - 1, v2 - 1, v3], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_fused in T.serial(288):
with T.block("placeholder_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused
v2 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused % 96
v3 = T.axis.spatial(32, ax0_ax1_ax2_ax3_fused % 32)
T.reads(placeholder_1[v0, v1, v2, v3])
T.writes(placeholder_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":3})
placeholder_shared[v0, v1, v2, v3] = placeholder_1[v0, v1, v2, v3]
for i4_1, i5_1, i0_3, i1_3, i2_3, i3_3, i4_2, i5_2, i0_4, i1_4, i2_4, i3_4 in T.grid(3, 1, 1, 4, 16, 8, 1, 3, 1, 7, 1, 1):
with T.block("depth_conv2d_nhwc"):
n = T.axis.spatial(1, i0_4 + i0_3)
h = T.axis.spatial(112, i0_1_i1_1_i2_1_i3_1_fused
w = T.axis.spatial(112, i2_4 + i0_2_i1_2_i2_2_i3_2_fused
c = T.axis.spatial(32, i0_1_i1_1_i2_1_i3_1_fused % 2 * 16 + i0_2_i1_2_i2_2_i3_2_fused % 2 * 8 + i3_3 + i3_4)
rh = T.axis.reduce(3, i4_2 + i4_0 * 3 + i4_1)
rw = T.axis.reduce(3, i5_0 * 3 + i5_1 * 3 + i5_2)
T.reads(PadInput_shared[n, h + rh, w + rw, c], placeholder_shared[0, rh, rw, c])
T.writes(depth_conv2d_nhwc_local[n, |
h, w, c])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
depth_conv2d_nhwc_local[n, h, w, c] = T.float32(0)
depth_conv2d_nhwc_local[n, h, w, c] = depth_conv2d_nhwc_local[n, h, w, c] + PadInput_shared[n, h + rh, w + rw, c] * placeholder_shared[0, rh, rw, c]
for ax0, ax1, ax2, ax3 in T.grid(1, 28, 16, 8):
with T.block("depth_conv2d_nhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(112, i0_1_i1_1_i2_1_i3_1_fused
v2 = T.axis.spatial(112, i0_2_i1_2_i2_2_i3_2_fused
v3 = T.axis.spatial(32, i0_1_i1_1_i2_1_i3_1_fused % 2 * 16 + i0_2_i1_2_i2_2_i3_2_fused % 2 * 8 + ax3)
T.reads(depth_conv2d_nhwc_local[v0, v1, v2, v3])
T.writes(depth_conv2d_nhwc[v0, v1, v2, v3])
depth_conv2d_nhwc[v0, v1, v2, v3] = depth_conv2d_nhwc_local[v0, v1, v2, v3]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 4, 1, 4, 7]),
("SamplePerfectTile", [1, 1, 7, 16, 1]),
("SamplePerfectTile", [1, 2, 2, 8, 1]),
("SamplePerfectTile", [1, 3, 1]),
("SamplePerfectTile", [1, 1, 3]),
("SampleCategorical", 2),
("SampleCategorical", 2),
("SampleCategorical", 1),
]
mod = create_te_workload("DEP", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[dep_0],
expected_decisions=[decision_0],
)
def test_cuda_dil():
@T.prim_func
def dil_0(inputs: T.Buffer[(1, 224, 224, 3), "float32"], weight: T.Buffer[(7, 7, 3, |
64), "float32"], conv2d_nhwc: T.Buffer[(1, 109, 109, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":512})
conv2d_nhwc_local = T.alloc_buffer([1, 109, 109, 64], dtype="float32", scope="local")
PadInput_shared = T.alloc_buffer([1, 230, 230, 3], dtype="float32", scope="shared")
weight_shared = T.alloc_buffer([7, 7, 3, 64], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(218, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(109, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(1, thread="threadIdx.x"):
for i4_0, i5_0, i6_0 in T.grid(7, 7, 3):
for ax0_ax1_ax2_ax3_fused in T.serial(217):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(230, i0_0_i1_0_i2_0_i3_0_fused
v2 = T.axis.spatial(230, i5_0 * 2 + ax0_ax1_ax2_ax3_fused % 217)
v3 = T.axis.spatial(3, i6_0 + 0)
T.reads(inputs[v0, v1 - 3, v2 - 3, v3])
T.writes(PadInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":2})
PadInput_shared[v0, v1, v2, v3] = T.if_then_else(3 <= v1 and v1 < 227 and 3 <= v2 and v2 < 227, inputs[v0, v1 - 3, v2 - 3, v3], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_fused in T.serial(32):
with T.block("weight_shared"):
v0, v1, v2 = T.axis.remap("SSS", [i4_0, i5_0, i6_0]) |
v3 = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 2 * 32 + ax0_ax1_ax2_ax3_fused)
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":4})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3, i4_2, i5_2, i6_2, i0_4, i1_4, i2_4, i3_4 in T.grid(1, 1, 1, 1, 1, 1, 8, 1, 1, 1, 1, 1, 1, 4):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_4)
h = T.axis.spatial(109, i1_4 + i0_0_i1_0_i2_0_i3_0_fused
w = T.axis.spatial(109, i0_1_i1_1_i2_1_i3_1_fused + i2_3 + i2_4)
co = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 2 * 32 + i3_3 * 4 + i3_4)
rh = T.axis.reduce(7, i4_0 + i4_1 + i4_2)
rw = T.axis.reduce(7, i5_2 + i5_0 + i5_1)
rc = T.axis.reduce(3, i6_1 + i6_2 + i6_0)
T.reads(PadInput_shared[n, h * 2 + rh * 2, w * 2 + rw * 2, co
T.writes(conv2d_nhwc_local[n, h, w, co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
conv2d_nhwc_local[n, h, w, co] = T.float32(0)
conv2d_nhwc_local[n, h, w, co] = conv2d_nhwc_local[n, h, w, co] + PadInput_shared[n, h * 2 + rh * 2, w * 2 + rw * 2, co
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 1, 32): |
with T.block("conv2d_nhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(109, i0_0_i1_0_i2_0_i3_0_fused
v2 = T.axis.spatial(109, i0_1_i1_1_i2_1_i3_1_fused + ax2)
v3 = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 2 * 32 + ax3)
T.reads(conv2d_nhwc_local[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_local[v0, v1, v2, v3]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [109, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 109, 1, 1, 1]),
("SamplePerfectTile", [2, 1, 1, 8, 4]),
("SamplePerfectTile", [7, 1, 1]),
("SamplePerfectTile", [7, 1, 1]),
("SamplePerfectTile", [3, 1, 1]),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 3),
]
mod = create_te_workload("DIL", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[dil_0],
expected_decisions=[decision_0],
)
def test_cuda_gmm():
@T.prim_func
def gmm_0(X: T.Buffer[(1, 128, 128), "float32"], Y: T.Buffer[(1, 128, 128), "float32"], Z: T.Buffer[(1, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":1024})
Z_local = T.alloc_buffer([1, 128, 128], dtype="float32", scope="local")
X_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
Y_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_fused in T.thread_binding(1, thread="blockIdx.x"):
for i0_1_i1 |
_1_i2_1_fused in T.thread_binding(32, thread="vthread.x"):
for i0_2_i1_2_i2_2_fused in T.thread_binding(2, thread="threadIdx.x"):
for i3_0 in T.serial(1):
for ax0_ax1_ax2_fused in T.serial(16384):
with T.block("X_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, ax0_ax1_ax2_fused
v2 = T.axis.spatial(128, ax0_ax1_ax2_fused % 128)
T.reads(X[v0, v1, v2])
T.writes(X_shared[v0, v1, v2])
T.block_attr({"meta_schedule.cooperative_fetch":2})
X_shared[v0, v1, v2] = X[v0, v1, v2]
for ax0_ax1_ax2_fused in T.serial(16384):
with T.block("Y_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, ax0_ax1_ax2_fused
v2 = T.axis.spatial(128, ax0_ax1_ax2_fused % 128)
T.reads(Y[v0, v1, v2])
T.writes(Y_shared[v0, v1, v2])
T.block_attr({"meta_schedule.cooperative_fetch":1})
Y_shared[v0, v1, v2] = Y[v0, v1, v2]
for i3_1, i0_3, i1_3, i2_3, i3_2, i0_4, i1_4, i2_4 in T.grid(32, 1, 2, 64, 4, 1, 2, 1):
with T.block("Z"):
b = T.axis.spatial(1, i0_4 + i0_3)
i = T.axis.spatial(128, i0_1_i1_1_i2_1_fused * 4 + i1_3 * 2 + i1_4)
j = T.axis.spatial(128, i2_4 + i0_2_i1_2_i2_2_fused * 64 + i2_3)
k = T.axis.reduce(128, i3_0 * 128 + i3_1 * 4 + i3_2) |
T.reads(X_shared[b, i, k], Y_shared[b, k, j])
T.writes(Z_local[b, i, j])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
Z_local[b, i, j] = T.float32(0)
Z_local[b, i, j] = Z_local[b, i, j] + X_shared[b, i, k] * Y_shared[b, k, j]
for ax0, ax1, ax2 in T.grid(1, 4, 64):
with T.block("Z_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, i0_1_i1_1_i2_1_fused * 4 + ax1)
v2 = T.axis.spatial(128, i0_2_i1_2_i2_2_fused * 64 + ax2)
T.reads(Z_local[v0, v1, v2])
T.writes(Z[v0, v1, v2])
Z[v0, v1, v2] = Z_local[v0, v1, v2]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 32, 1, 2, 2]),
("SamplePerfectTile", [1, 1, 2, 64, 1]),
("SamplePerfectTile", [1, 32, 4]),
("SampleCategorical", 1),
("SampleCategorical", 0),
("SampleCategorical", 4),
]
mod = create_te_workload("GMM", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[gmm_0],
expected_decisions=[decision_0],
)
def test_cuda_grp():
@T.prim_func
def grp_0(inputs: T.Buffer[(1, 56, 56, 64), "float32"], weight: T.Buffer[(3, 3, 16, 128), "float32"], conv2d_nhwc: T.Buffer[(1, 28, 28, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":16}) |
conv2d_nhwc_local = T.alloc_buffer([1, 28, 28, 128], dtype="float32", scope="local")
PadInput_shared = T.alloc_buffer([1, 58, 58, 64], dtype="float32", scope="shared")
weight_shared = T.alloc_buffer([3, 3, 16, 128], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(2, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(1, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(112, thread="threadIdx.x"):
for i4_0, i5_0, i6_0 in T.grid(3, 3, 1):
for ax0_ax1_ax2_ax3_fused in T.serial(95040):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(58, i0_0_i1_0_i2_0_i3_0_fused * 28 + i4_0 + ax0_ax1_ax2_ax3_fused % 95040
v2 = T.axis.spatial(58, i5_0 + ax0_ax1_ax2_ax3_fused % 3520
v3 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused % 64)
T.reads(inputs[v0, v1 - 1, v2 - 1, v3])
T.writes(PadInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":2})
PadInput_shared[v0, v1, v2, v3] = T.if_then_else(1 <= v1 and v1 < 57 and 1 <= v2 and v2 < 57, inputs[v0, v1 - 1, v2 - 1, v3], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_fused in T.serial(2048):
with T.block("weight_shared"):
v0, v1 = T.axis.remap("SS", [i4_0, i5_0])
v2 = T.axis.spatial(16, ax0_ax1_ax2_ax3_fused
v3 = T.axis.spatial(128, ax0_ax1_ax2_ax3_fused % 128)
T.reads(weight[v0, v1, v2, v3]) |
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":1})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3, i4_2, i5_2, i6_2, i0_4, i1_4, i2_4, i3_4 in T.grid(1, 1, 2, 1, 2, 1, 2, 1, 1, 8, 1, 7, 4, 4):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_4)
h = T.axis.spatial(28, i0_0_i1_0_i2_0_i3_0_fused * 14 + i1_3 * 7 + i1_4)
w = T.axis.spatial(28, i0_2_i1_2_i2_2_i3_2_fused
co = T.axis.spatial(128, i0_2_i1_2_i2_2_i3_2_fused % 16 * 8 + i3_3 * 4 + i3_4)
rh = T.axis.reduce(3, i4_0 + i4_1 + i4_2)
rw = T.axis.reduce(3, i5_2 + i5_0 + i5_1)
rc = T.axis.reduce(16, i6_0 * 16 + i6_1 * 8 + i6_2)
T.reads(PadInput_shared[n, h * 2 + rh, w * 2 + rw, co
T.writes(conv2d_nhwc_local[n, h, w, co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
conv2d_nhwc_local[n, h, w, co] = T.float32(0)
conv2d_nhwc_local[n, h, w, co] = conv2d_nhwc_local[n, h, w, co] + PadInput_shared[n, h * 2 + rh, w * 2 + rw, co
for ax0, ax1, ax2, ax3 in T.grid(1, 14, 4, 8):
with T.block("conv2d_nhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(28, i0_0_i1_0_i2_0_i3_0_fused * 14 + ax1) |
v2 = T.axis.spatial(28, i0_2_i1_2_i2_2_i3_2_fused
v3 = T.axis.spatial(128, i0_2_i1_2_i2_2_i3_2_fused % 16 * 8 + ax3)
T.reads(conv2d_nhwc_local[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_local[v0, v1, v2, v3]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [2, 1, 1, 2, 7]),
("SamplePerfectTile", [1, 1, 7, 1, 4]),
("SamplePerfectTile", [1, 1, 16, 2, 4]),
("SamplePerfectTile", [3, 1, 1]),
("SamplePerfectTile", [3, 1, 1]),
("SamplePerfectTile", [1, 2, 8]),
("SampleCategorical", 1),
("SampleCategorical", 0),
("SampleCategorical", 1),
]
mod = create_te_workload("GRP", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[grp_0],
expected_decisions=[decision_0],
)
def test_cuda_t2d():
@T.prim_func
def t2d_0(inputs: T.Buffer[(1, 4, 4, 512), "float32"], weight: T.Buffer[(4, 4, 512, 256), "float32"], conv2d_transpose_nhwc: T.Buffer[(1, 8, 8, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":64})
conv2d_transpose_nhwc_local = T.alloc_buffer([1, 8, 8, 256], dtype="float32", scope="local")
PadInput_shared = T.alloc_buffer([1, 6, 6, 512], dtype="float32", scope="shared")
weight_shared = T.alloc_buffer([4, 4, 512, 256], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(256, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(2, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding |
(1, thread="threadIdx.x"):
for i4_0, i5_0, i6_0 in T.grid(4, 1, 16):
for ax0_ax1_ax2_ax3_fused in T.serial((i4_0 % 2 + 1)
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused
v2 = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused % 64
v3 = T.axis.spatial(512, i6_0 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(inputs[v0, v1 - 1, v2 - 1, v3])
T.writes(PadInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":2})
PadInput_shared[v0, v1, v2, v3] = T.if_then_else(1 <= v1 and v1 < 5 and 1 <= v2 and v2 < 5, inputs[v0, v1 - 1, v2 - 1, v3], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_fused in T.serial(2048):
with T.block("weight_shared"):
v0 = T.axis.spatial(4, i4_0 * -1 + 3)
v1 = T.axis.spatial(4, ax0_ax1_ax2_ax3_fused
v2 = T.axis.spatial(512, i6_0 * 32 + ax0_ax1_ax2_ax3_fused % 512
v3 = T.axis.spatial(256, i0_0_i1_0_i2_0_i3_0_fused % 16 * 16 + ax0_ax1_ax2_ax3_fused % 16)
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":4})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3, i4_2, i5_2, i6_2, i0_4, i1_4, i2_4, i3_4 in T.grid(1, 1, 4, 1, 2, 1, 8, 1, 4, 8, 1, 1, 2, 1): |
with T.block("conv2d_transpose_nhwc"):
n = T.axis.spatial(1, i0_3 + i0_4)
h = T.axis.spatial(8, i1_4 + i0_0_i1_0_i2_0_i3_0_fused
w = T.axis.spatial(8, i0_0_i1_0_i2_0_i3_0_fused % 64
co = T.axis.spatial(256, i3_4 + i0_0_i1_0_i2_0_i3_0_fused % 16 * 16 + i0_1_i1_1_i2_1_i3_1_fused * 8 + i3_3)
rh = T.axis.reduce(4, i4_0 + i4_1 + i4_2)
rw = T.axis.reduce(4, i5_0 * 4 + i5_1 * 4 + i5_2)
rc = T.axis.reduce(512, i6_0 * 32 + i6_1 * 8 + i6_2)
T.reads(PadInput_shared[n, (h + rh)
T.writes(conv2d_transpose_nhwc_local[n, h, w, co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
conv2d_transpose_nhwc_local[n, h, w, co] = T.float32(0)
conv2d_transpose_nhwc_local[n, h, w, co] = conv2d_transpose_nhwc_local[n, h, w, co] + T.if_then_else((h + rh) % 2 == 0 and (w + rw) % 2 == 0, PadInput_shared[n, (h + rh)
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 2, 8):
with T.block("conv2d_transpose_nhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, i0_0_i1_0_i2_0_i3_0_fused
v2 = T.axis.spatial(8, i0_0_i1_0_i2_0_i3_0_fused % 64
v3 = T.axis.spatial(256, i0_0_i1_0_i2_0_i3_0_fused % 16 * 16 + i0_1_i1_1_i2_1_i3_1_fused * 8 + ax3)
T.reads(conv2d_transpose_nhwc_local[v0, v1, v2, v3])
T.writes(conv2d_tran |
spose_nhwc[v0, v1, v2, v3])
conv2d_transpose_nhwc[v0, v1, v2, v3] = conv2d_transpose_nhwc_local[v0, v1, v2, v3]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [4, 1, 1, 2, 1]),
("SamplePerfectTile", [4, 1, 1, 1, 2]),
("SamplePerfectTile", [16, 2, 1, 8, 1]),
("SamplePerfectTile", [4, 1, 1]),
("SamplePerfectTile", [1, 1, 4]),
("SamplePerfectTile", [16, 4, 8]),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 2),
]
mod = create_te_workload("T2D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[t2d_0],
expected_decisions=[decision_0],
debug_mask=0,
)
def test_cuda_nrm():
@T.prim_func
def nrm_0(A: T.Buffer[(1, 256, 256), "float32"], D: T.Buffer[1, "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":512})
C = T.alloc_buffer([1], dtype="float32")
for i0_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
for i1, i2 in T.grid(256, 256):
with T.block("C"):
b = T.axis.spatial(1, 0)
i, j = T.axis.remap("RR", [i1, i2])
T.reads(A[b, i, j])
T.writes(C[b])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + A[b, i, j] * A[b, i, j]
for i0_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
with T.block("D"): |
b = T.axis.spatial(1, 0)
T.reads(C[b])
T.writes(D[b])
D[b] = T.sqrt(C[b], dtype="float32")
@T.prim_func
def nrm_1(A: T.Buffer[(1, 256, 256), "float32"], D: T.Buffer[1, "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":1024})
C_shared = T.alloc_buffer([1], dtype="float32", scope="shared")
for i0_0_fused in T.thread_binding(1, thread="blockIdx.x"):
for ax0, ax1_ax2_fused_0 in T.grid(1, 512):
for ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("C"):
b = T.axis.spatial(1, ax0)
i = T.axis.reduce(256, (ax1_ax2_fused_0 * 128 + ax1_ax2_fused_1)
j = T.axis.reduce(256, (ax1_ax2_fused_0 * 128 + ax1_ax2_fused_1) % 256)
T.reads(A[b, i, j])
T.writes(C_shared[b])
with T.init():
C_shared[b] = T.float32(0)
C_shared[b] = C_shared[b] + A[b, i, j] * A[b, i, j]
for i0_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("D"):
b = T.axis.spatial(1, i0_1)
T.where(T.Mul(0, 128) + i0_1 < 1)
T.reads(C_shared[b])
T.writes(D[b])
D[b] = T.sqrt(C_shared[b], dtype="float32")
decision_0 = [
("SampleCategorical", 3),
]
decision_1 = [
("SampleCategorical", 5),
("SampleCategorical", 4),
]
mod = create_te_workload("NRM", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
exp |
ected_mods=[nrm_0, nrm_1],
expected_decisions=[decision_0, decision_1],
)
def test_cuda_sfm():
@T.prim_func
def sfm_0(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":0})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0_fused_0 in T.thread_binding(2, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for i1 in T.serial(256):
with T.block("T_softmax_maxelem"):
i0 = T.axis.spatial(256, i0_fused_0 * 128 + i0_fused_1)
k = T.axis.reduce(256, i1)
T.reads(A[i0, k])
T.writes(T_softmax_maxelem[i0])
with T.init():
T_softmax_maxelem[i0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[i0] = T.max(T_softmax_maxelem[i0], A[i0, k])
for i0_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
for i1 in T.serial(256):
with T.block("T_softmax_expsum"):
i0 = T.axis.spatial(256, i0_fused_0 * 256 + i0_fused_1)
k = T.axis.reduce(256, i1)
T.reads(A[i0, k], T_softmax_maxelem[i0])
T.writes(T_softmax_expsum[i0])
with T.init():
T_softmax_expsum[i0] = T.float32(0)
T_softmax_expsum[i0] = T_softmax_expsum[i0] + T.exp(A[i0, k] |
- T_softmax_maxelem[i0], dtype="float32")
for i0_i1_fused_0 in T.thread_binding(1024, thread="blockIdx.x"):
for i0_i1_fused_1 in T.thread_binding(64, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0 = T.axis.spatial(256, (i0_i1_fused_0 * 64 + i0_i1_fused_1)
i1 = T.axis.spatial(256, (i0_i1_fused_0 * 64 + i0_i1_fused_1) % 256)
T.reads(A[i0, i1], T_softmax_maxelem[i0], T_softmax_expsum[i0])
T.writes(T_softmax_norm[i0, i1])
T.block_attr({"axis":1})
T_softmax_norm[i0, i1] = T.exp(A[i0, i1] - T_softmax_maxelem[i0], dtype="float32") / T_softmax_expsum[i0]
@T.prim_func
def sfm_1(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":16})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0_fused in T.thread_binding(256, thread="blockIdx.x"):
for i1_0 in T.serial(64):
for i1_1 in T.thread_binding(4, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0 = T.axis.spatial(256, i0_fused)
k = T.axis.reduce(256, i1_0 * 4 + i1_1)
T.reads(A[i0, k])
T.writes(T_softmax_maxelem[i0])
with T.init():
T_softmax_maxelem[i0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[i0] = T.max(T_softmax_maxelem[i0], A[i0, k])
for i0_fused_0 in T.thread_binding(4, thread="blockIdx.x"):
fo |
r i0_fused_1 in T.thread_binding(64, thread="threadIdx.x"):
for i1 in T.serial(256):
with T.block("T_softmax_expsum"):
i0 = T.axis.spatial(256, i0_fused_0 * 64 + i0_fused_1)
k = T.axis.reduce(256, i1)
T.reads(A[i0, k], T_softmax_maxelem[i0])
T.writes(T_softmax_expsum[i0])
with T.init():
T_softmax_expsum[i0] = T.float32(0)
T_softmax_expsum[i0] = T_softmax_expsum[i0] + T.exp(A[i0, k] - T_softmax_maxelem[i0], dtype="float32")
for i0_i1_fused_0 in T.thread_binding(256, thread="blockIdx.x"):
for i0_i1_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0 = T.axis.spatial(256, (i0_i1_fused_0 * 256 + i0_i1_fused_1)
i1 = T.axis.spatial(256, (i0_i1_fused_0 * 256 + i0_i1_fused_1) % 256)
T.reads(A[i0, i1], T_softmax_maxelem[i0], T_softmax_expsum[i0])
T.writes(T_softmax_norm[i0, i1])
T.block_attr({"axis":1})
T_softmax_norm[i0, i1] = T.exp(A[i0, i1] - T_softmax_maxelem[i0], dtype="float32") / T_softmax_expsum[i0]
@T.prim_func
def sfm_2(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":512})
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0_fused_0 in T.thread_binding(8, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(32, thread="threadIdx.x |
"):
for i1 in T.serial(256):
with T.block("T_softmax_maxelem"):
i0 = T.axis.spatial(256, i0_fused_0 * 32 + i0_fused_1)
k = T.axis.reduce(256, i1)
T.reads(A[i0, k])
T.writes(T_softmax_maxelem[i0])
with T.init():
T_softmax_maxelem[i0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[i0] = T.max(T_softmax_maxelem[i0], A[i0, k])
for i0_fused in T.thread_binding(256, thread="blockIdx.x"):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0 = T.axis.spatial(256, i0_fused + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[i0, k], T_softmax_maxelem[i0])
T.writes(T_softmax_expsum_shared[i0])
with T.init():
T_softmax_expsum_shared[i0] = T.float32(0)
T_softmax_expsum_shared[i0] = T_softmax_expsum_shared[i0] + T.exp(A[i0, k] - T_softmax_maxelem[i0], dtype="float32")
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0 = T.axis.spatial(256, i0_fused)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.where(i1_0 * 512 + i1_1 < 256)
T.reads(A[i0, i1], T_softmax_maxelem[i0], T_softmax_expsum_shared[i0])
T.writes(T_softmax_norm[i0, i1])
T.block_attr({"axis":1})
T_ |
softmax_norm[i0, i1] = T.exp(A[i0, i1] - T_softmax_maxelem[i0], dtype="float32") / T_softmax_expsum_shared[i0]
@T.prim_func
def sfm_3(A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":0})
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0_fused in T.thread_binding(256, thread="blockIdx.x"):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0 = T.axis.spatial(256, i0_fused + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[i0, k])
T.writes(T_softmax_maxelem_shared[i0])
with T.init():
T_softmax_maxelem_shared[i0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem_shared[i0] = T.max(T_softmax_maxelem_shared[i0], A[i0, k])
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0 = T.axis.spatial(256, i0_fused + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[i0, k], T_softmax_maxelem_shared[i0])
T.writes(T_softmax_expsum_shared[i0])
with T.init(): |
T_softmax_expsum_shared[i0] = T.float32(0)
T_softmax_expsum_shared[i0] = T_softmax_expsum_shared[i0] + T.exp(A[i0, k] - T_softmax_maxelem_shared[i0], dtype="float32")
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0 = T.axis.spatial(256, i0_fused)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.where(i1_0 * 512 + i1_1 < 256)
T.reads(A[i0, i1], T_softmax_maxelem_shared[i0], T_softmax_expsum_shared[i0])
T.writes(T_softmax_norm[i0, i1])
T.block_attr({"axis":1})
T_softmax_norm[i0, i1] = T.exp(A[i0, i1] - T_softmax_maxelem_shared[i0], dtype="float32") / T_softmax_expsum_shared[i0]
decision_0 = [
("SampleCategorical", 0),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 2),
]
decision_1 = [
("SampleCategorical", 0),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 1),
]
decision_2 = [
("SampleCategorical", 7),
("SampleCategorical", 3),
("SampleCategorical", 0),
]
decision_3 = [
("SampleCategorical", 7),
("SampleCategorical", 0),
("SampleCategorical", 0),
]
mod = create_te_workload("SFM", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[sfm_0, sfm_1, sfm_2, sfm_3],
expected_decisions=[decision_0, decision_1, decision_2, decision_3],
)
def test_cuda_cbr():
@T.prim_func
def cbr_0(data: T.Buffer[(1, 224, 224, 3), "float32"], kernel: T.Buffer[(7, 7, 3, 64), "float32"], bias: T.Buffer[64, "float32"], bn_offset: T.Buffer[64, "float32"], bn_scale: T.Buffer[64, "float32" |
], compute: T.Buffer[(1, 112, 112, 64), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":512})
Conv2dOutput_local = T.alloc_buffer([1, 112, 112, 64], dtype="float32", scope="local")
PaddedInput_shared = T.alloc_buffer([1, 230, 230, 3], dtype="float32", scope="shared")
kernel_shared = T.alloc_buffer([7, 7, 3, 64], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(14, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(4, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(128, thread="threadIdx.x"):
for i4_0, i5_0, i6_0 in T.grid(7, 1, 3):
for ax0_ax1_ax2_ax3_fused in T.serial(8251):
with T.block("PaddedInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(230, ax0_ax1_ax2_ax3_fused
v2 = T.axis.spatial(230, i0_0_i1_0_i2_0_i3_0_fused
v3 = T.axis.spatial(3, i6_0)
T.reads(data[v0, v1 - 3, v2 - 3, v3])
T.writes(PaddedInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":1})
PaddedInput_shared[v0, v1, v2, v3] = T.if_then_else(3 <= v1 and v1 < 227 and 3 <= v2 and v2 < 227, data[v0, v1 - 3, v2 - 3, v3], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_fused in T.serial(224):
with T.block("kernel_shared"):
v0 = T.axis.spatial(7, i4_0)
v1 = T.axis.spati |
al(7, ax0_ax1_ax2_ax3_fused
v2 = T.axis.spatial(3, i6_0)
v3 = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 2 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(kernel[v0, v1, v2, v3])
T.writes(kernel_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":1})
kernel_shared[v0, v1, v2, v3] = kernel[v0, v1, v2, v3]
for i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3, i4_2, i5_2, i6_2, i0_4, i1_4, i2_4, i3_4 in T.grid(1, 1, 1, 1, 1, 1, 2, 1, 7, 1, 1, 7, 1, 8):
with T.block("Conv2dOutput"):
nn = T.axis.spatial(1, i0_3 + i0_4)
yy = T.axis.spatial(112, i0_1_i1_1_i2_1_i3_1_fused
xx = T.axis.spatial(112, i2_4 + i0_0_i1_0_i2_0_i3_0_fused
ff = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 2 * 32 + i0_1_i1_1_i2_1_i3_1_fused % 2 * 16 + i3_3 * 8 + i3_4)
ry = T.axis.reduce(7, i4_0 + i4_1 + i4_2)
rx = T.axis.reduce(7, i5_0 * 7 + i5_1 * 7 + i5_2)
rc = T.axis.reduce(3, i6_1 + i6_2 + i6_0)
T.reads(PaddedInput_shared[nn, yy * 2 + ry, xx * 2 + rx, rc], kernel_shared[ry, rx, rc, ff])
T.writes(Conv2dOutput_local[nn, yy, xx, ff])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
Conv2dOutput_local[nn, yy, xx, ff] = T.float32(0)
Conv2dOutput_local[nn, yy, xx, ff] = Conv2dOut |
put_local[nn, yy, xx, ff] + PaddedInput_shared[nn, yy * 2 + ry, xx * 2 + rx, rc] * kernel_shared[ry, rx, rc, ff]
for ax0, ax1, ax2, ax3 in T.grid(1, 7, 1, 16):
with T.block("Conv2dOutput_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(112, i0_1_i1_1_i2_1_i3_1_fused
v2 = T.axis.spatial(112, i0_0_i1_0_i2_0_i3_0_fused
v3 = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 2 * 32 + i0_1_i1_1_i2_1_i3_1_fused % 2 * 16 + ax3)
T.reads(Conv2dOutput_local[v0, v1, v2, v3], bias[v3], bn_scale[v3], bn_offset[v3])
T.writes(compute[v0, v1, v2, v3])
compute[v0, v1, v2, v3] = T.max((Conv2dOutput_local[v0, v1, v2, v3] + bias[v3]) * bn_scale[v3] + bn_offset[v3], T.float32(0))
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 2, 8, 1, 7]),
("SamplePerfectTile", [7, 1, 16, 1, 1]),
("SamplePerfectTile", [2, 2, 1, 2, 8]),
("SamplePerfectTile", [7, 1, 1]),
("SamplePerfectTile", [1, 1, 7]),
("SamplePerfectTile", [3, 1, 1]),
("SampleCategorical", 0),
("SampleCategorical", 0),
("SampleCategorical", 3),
]
mod = create_te_workload("CBR", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cbr_0],
expected_decisions=[decision_0],
)
def test_cuda_tbg():
@T.prim_func
def tbg_0(query: T.Buffer[(1, 128, 12, 64), "float32"], value: T.Buffer[(1, 128, 12, 64), "float32"], C: T.Buffer[(1, 12, 128, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":1024})
C_local = T.alloc |
_buffer([1, 12, 128, 128], dtype="float32", scope="local")
query_T_shared = T.alloc_buffer([1, 12, 128, 64], dtype="float32", scope="shared")
value_T_shared = T.alloc_buffer([1, 12, 64, 128], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(4, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(192, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(32, thread="threadIdx.x"):
for i4_0 in T.serial(8):
for ax0_ax1_ax2_ax3_fused in T.serial(12288):
with T.block("query_T_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(12, ax0_ax1_ax2_ax3_fused
v2 = T.axis.spatial(128, ax0_ax1_ax2_ax3_fused % 1024
v3 = T.axis.spatial(64, i4_0 * 8 + ax0_ax1_ax2_ax3_fused % 8)
T.reads(query[v0, v2, v1, v3])
T.writes(query_T_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":3})
query_T_shared[v0, v1, v2, v3] = query[v0, v2, v1, v3]
for ax0_ax1_ax2_ax3_fused in T.serial(3072):
with T.block("value_T_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(12, ax0_ax1_ax2_ax3_fused
v2 = T.axis.spatial(64, i4_0 * 8 + ax0_ax1_ax2_ax3_fused % 256
v3 = T.axis.spatial(128, i0_0_i1_0_i2_0_i3_0_fused * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(value[v0, v3, v1, v2])
T.writes(value_T_shared[v0, v1, v2, v3]) |
T.block_attr({"meta_schedule.cooperative_fetch":4})
value_T_shared[v0, v1, v2, v3] = value[v0, v3, v1, v2]
for i4_1, i0_3, i1_3, i2_3, i3_3, i4_2, i0_4, i1_4, i2_4, i3_4 in T.grid(4, 1, 2, 1, 1, 2, 1, 1, 4, 1):
with T.block("C"):
b = T.axis.spatial(1, i0_4 + i0_3)
h = T.axis.spatial(12, i1_4 + i0_1_i1_1_i2_1_i3_1_fused
i = T.axis.spatial(128, i0_1_i1_1_i2_1_i3_1_fused % 32
j = T.axis.spatial(128, i3_4 + i0_0_i1_0_i2_0_i3_0_fused * 32 + i0_1_i1_1_i2_1_i3_1_fused % 8 * 4 + i0_2_i1_2_i2_2_i3_2_fused % 4 + i3_3)
k = T.axis.reduce(64, i4_0 * 8 + i4_1 * 2 + i4_2)
T.reads(query_T_shared[b, h, i, k], value_T_shared[b, h, k, j])
T.writes(C_local[b, h, i, j])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
with T.init():
C_local[b, h, i, j] = T.float32(0)
C_local[b, h, i, j] = C_local[b, h, i, j] + query_T_shared[b, h, i, k] * value_T_shared[b, h, k, j]
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 4, 1):
with T.block("C_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(12, i0_1_i1_1_i2_1_i3_1_fused
v2 = T.axis.spatial(128, i0_1_i1_1_i2_1_i3_1_fused % 32
v3 = T.axis.spatial(128, i0_0_i1_0_i2_0_i3_0_fused * 32 + i0_1_i1_1_i2_1_i3_1_fused % 8 * 4 + i0_2_i1_2_i2_2_i3_2_fused % 4 + ax3)
T.reads(C_local[v0, v1, v2, |
v3])
T.writes(C[v0, v1, v2, v3])
C[v0, v1, v2, v3] = C_local[v0, v1, v2, v3]
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 6, 1, 2, 1]),
("SamplePerfectTile", [1, 4, 8, 1, 4]),
("SamplePerfectTile", [4, 8, 4, 1, 1]),
("SamplePerfectTile", [8, 4, 2]),
("SampleCategorical", 2),
("SampleCategorical", 3),
("SampleCategorical", 4),
]
mod = create_te_workload("TBG", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[tbg_0],
expected_decisions=[decision_0],
)
if __name__ == "__main__":
test_cuda_c1d()
test_cuda_c2d()
test_cuda_c3d()
test_cuda_cap()
test_cuda_dep()
test_cuda_dil()
test_cuda_gmm()
test_cuda_grp()
test_cuda_t2d()
test_cuda_nrm()
test_cuda_sfm()
test_cuda_cbr()
test_cuda_tbg() |
"""Tests for MetaSchedule search space on CUDA"""
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation |
import (
check_sketches,
generate_design_space,
print_sketches,
)
from tvm.meta_schedule.testing.te_workload |
import create_te_workload
from tvm.script |
import tir as T
from tvm.target |
import Target
def _target():
return Target("nvidia/geforce-rtx-3070")
def _design_space(mod):
return generate_design_space(
kind="cuda",
mod=mod,
target=_target(),
types=ms.ScheduleRule,
)
def test_cuda_nhwc():
@T.prim_func
def cuda_nhwc_0(data: T.Buffer[(1, 14, 14, 128), "float32"], weight: T.Buffer[(6, 6, 128, 128), "float32"], conv2d_winograd: T.Buffer[(1, 12, 12, 128), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":16})
input_tile_local = T.alloc_buffer([6, 6, 9, 128], dtype="float32", scope="local")
data_pack = T.alloc_buffer([6, 6, 9, 128], dtype="float32")
bgemm = T.alloc_buffer([6, 6, 9, 128], dtype="float32")
inverse = T.alloc_buffer([4, 4, 9, 128], dtype="float32")
data_pack_local = T.alloc_buffer([6, 6, 9, 128], dtype="float32", scope="local")
bgemm_local = T.alloc_buffer([6, 6, 9, 128], dtype="float32", scope="local")
data_pack_shared = T.alloc_buffer([6, 6, 9, 128], dtype="float32", scope="shared")
weight_shared = T.alloc_buffer([6, 6, 128, 128], dtype="float32", scope="shared")
for i2_0_i3_0_i2_1_i3_1_fused_0 in T.thread_binding(2, thread="blockIdx.x"):
for i2_0_i3_0_i2_1_i3_1_fused_1 in T.thread_binding(1024, thread="threadIdx.x"):
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 1, 1):
with T.block("input_tile"):
T.where(i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1 < 1152)
eps, nu = T.axis.remap("SS", [ax0, ax1])
p = T.axis.spatial(9, (i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1)
ci = T.axis.spatial(128, (i2_0_i3 |
_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1) % 384
T.reads(data[p
T.writes(input_tile_local[eps, nu, p, ci])
T.block_attr({"schedule_rule":"None"})
input_tile_local[eps, nu, p, ci] = T.if_then_else(0 <= p % 9
for i0 in T.unroll(6):
for i1 in T.unroll(6):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("data_pack"):
T.where(i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1 < 1152)
eps, nu = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(9, (i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1)
ci = T.axis.spatial(128, (i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1) % 384
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(input_tile_local[r_a, r_b, p, ci])
T.writes(data_pack_local[eps, nu, p, ci])
T.block_attr({"schedule_rule":"conv2d_nhwc_winograd_data_pack"})
with T.init():
data_pack_local[eps, nu, p, ci] = T.float32(0)
data_pack_local[eps, nu, p, ci] = data_pack_local[eps, nu, p, ci] + input_tile_local[r_a, r_b, p, ci] * T.Select(r_a % 6 == 5 and eps % 6 == 5, T.float32(1), T.Select(r_a % 6 == 5 and eps % 6 == 4, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 3, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 2, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 1, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 0, T.float32(0), T.Select(r_a % 6 = |
= 4 and eps % 6 == 5, T.float32(1.5), T.Select(r_a % 6 == 4 and eps % 6 == 4, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 3, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 2, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 1, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 0, T.float32(1), T.Select(r_a % 6 == 3 and eps % 6 == 5, T.float32(-2), T.Select(r_a % 6 == 3 and eps % 6 == 4, T.float32(-0.5), T.Select(r_a % 6 == 3 and eps % 6 == 3, T.float32(2), T.Select(r_a % 6 == 3 and eps % 6 == 2, T.float32(2.5), T.Select(r_a % 6 == 3 and eps % 6 == 1, T.float32(0.5), T.Select(r_a % 6 == 3 and eps % 6 == 0, T.float32(1.5), T.Select(r_a % 6 == 2 and eps % 6 == 5, T.float32(-1.5), T.Select(r_a % 6 == 2 and eps % 6 == 4, T.float32(-1), T.Select(r_a % 6 == 2 and eps % 6 == 3, T.float32(-1), T.Select(r_a % 6 == 2 and eps % 6 == 2, T.float32(0.5), T.Select(r_a % 6 == 2 and eps % 6 == 1, T.float32(-2.5), T.Select(r_a % 6 == 2 and eps % 6 == 0, T.float32(-2), T.Select(r_a % 6 == 1 and eps % 6 == 5, T.float32(1), T.Select(r_a % 6 == 1 and eps % 6 == 4, T.float32(0.5), T.Select(r_a % 6 == 1 and eps % 6 == 3, T.float32(-2), T.Select(r_a % 6 == 1 and eps % 6 == 2, T.float32(-1), T.Select(r_a % 6 == 1 and eps % 6 == 1, T.float32(1), T.Select(r_a % 6 == 1 and eps % 6 == 0, T.float32(-1.5), T.Select(r_a % 6 == 0 and eps % 6 == 5, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 4, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 3, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 2, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 1, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))))))))))))))) * T.Select(r_b % 6 == 5 and nu % 6 == 5, T.float32(1), T.Select(r_b % 6 == 5 and nu % 6 == 4, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 3, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 2, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 1, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 0, T.float32(0), T.Select(r_b % 6 == 4 and nu % 6 == 5, T.float3 |
2(1.5), T.Select(r_b % 6 == 4 and nu % 6 == 4, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 3, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 2, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 1, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 0, T.float32(1), T.Select(r_b % 6 == 3 and nu % 6 == 5, T.float32(-2), T.Select(r_b % 6 == 3 and nu % 6 == 4, T.float32(-0.5), T.Select(r_b % 6 == 3 and nu % 6 == 3, T.float32(2), T.Select(r_b % 6 == 3 and nu % 6 == 2, T.float32(2.5), T.Select(r_b % 6 == 3 and nu % 6 == 1, T.float32(0.5), T.Select(r_b % 6 == 3 and nu % 6 == 0, T.float32(1.5), T.Select(r_b % 6 == 2 and nu % 6 == 5, T.float32(-1.5), T.Select(r_b % 6 == 2 and nu % 6 == 4, T.float32(-1), T.Select(r_b % 6 == 2 and nu % 6 == 3, T.float32(-1), T.Select(r_b % 6 == 2 and nu % 6 == 2, T.float32(0.5), T.Select(r_b % 6 == 2 and nu % 6 == 1, T.float32(-2.5), T.Select(r_b % 6 == 2 and nu % 6 == 0, T.float32(-2), T.Select(r_b % 6 == 1 and nu % 6 == 5, T.float32(1), T.Select(r_b % 6 == 1 and nu % 6 == 4, T.float32(0.5), T.Select(r_b % 6 == 1 and nu % 6 == 3, T.float32(-2), T.Select(r_b % 6 == 1 and nu % 6 == 2, T.float32(-1), T.Select(r_b % 6 == 1 and nu % 6 == 1, T.float32(1), T.Select(r_b % 6 == 1 and nu % 6 == 0, T.float32(-1.5), T.Select(r_b % 6 == 0 and nu % 6 == 5, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 4, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 3, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 2, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 1, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))))))))))))))
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 1, 1):
with T.block("data_pack_local"):
T.where(i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1 < 1152)
v0, v1 = T.axis.remap("SS", [ax0, ax1])
v2 = T.axis.spatial(9, (i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1) |
v3 = T.axis.spatial(128, (i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1) % 384
T.reads(data_pack_local[v0, v1, v2, v3])
T.writes(data_pack[v0, v1, v2, v3])
data_pack[v0, v1, v2, v3] = data_pack_local[v0, v1, v2, v3]
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(96, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(4, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(27, thread="threadIdx.x"):
for i4_0 in T.serial(8):
for ax0_ax1_ax2_ax3_fused in T.serial(1728):
with T.block("data_pack_shared"):
v0 = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused
v1 = T.axis.spatial(6, ax0_ax1_ax2_ax3_fused % 864
v2 = T.axis.spatial(9, ax0_ax1_ax2_ax3_fused % 144
v3 = T.axis.spatial(128, i4_0 * 16 + ax0_ax1_ax2_ax3_fused % 16)
T.reads(data_pack[v0, v1, v2, v3])
T.writes(data_pack_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":1})
data_pack_shared[v0, v1, v2, v3] = data_pack[v0, v1, v2, v3]
for ax0_ax1_ax2_ax3_fused in T.serial(768):
with T.block("weight_shared"):
v0 = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused
v1 = T.axis.spatial(6, ax0_ax1_ax2_ax3_fused % 384
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_i3_0_fused % 32 * 4 + ax0_ax1_ax2_ax3_fused % 64
v3 = T.axis.spatial(128, i4_0 * 16 + ax0_ax1_ax2_ax3_fused % 16) |
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch":3})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for i4_1, i0_3, i1_3, i2_3, i3_3, i4_2, i0_4, i1_4, i2_4, i3_4 in T.grid(1, 2, 1, 1, 2, 16, 1, 1, 1, 1):
with T.block("bgemm"):
eps = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused
nu = T.axis.spatial(6, i1_3 + i1_4 + i0_1_i1_1_i2_1_i3_1_fused
p = T.axis.spatial(9, i0_2_i1_2_i2_2_i3_2_fused % 9 + i2_3 + i2_4)
co = T.axis.spatial(128, i3_4 + i0_0_i1_0_i2_0_i3_0_fused % 32 * 4 + i0_1_i1_1_i2_1_i3_1_fused % 2 * 2 + i3_3)
ci = T.axis.reduce(128, i4_0 * 16 + i4_1 * 16 + i4_2)
T.reads(data_pack_shared[eps, nu, p, ci], weight_shared[eps, nu, co, ci])
T.writes(bgemm_local[eps, nu, p, co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS", "meta_schedule.write_cache_level":[3]})
with T.init():
bgemm_local[eps, nu, p, co] = T.float32(0)
bgemm_local[eps, nu, p, co] = bgemm_local[eps, nu, p, co] + data_pack_shared[eps, nu, p, ci] * weight_shared[eps, nu, co, ci]
for ax0, ax1, ax2, ax3 in T.grid(2, 1, 1, 2):
with T.block("bgemm_local"):
v0 = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused
v1 = T.axis.spatial(6, i0_1_i1_1_i2_1_i3_1_fused |
v2 = T.axis.spatial(9, i0_2_i1_2_i2_2_i3_2_fused % 9 + ax2)
v3 = T.axis.spatial(128, i0_0_i1_0_i2_0_i3_0_fused % 32 * 4 + i0_1_i1_1_i2_1_i3_1_fused % 2 * 2 + ax3)
T.reads(bgemm_local[v0, v1, v2, v3])
T.writes(bgemm[v0, v1, v2, v3])
bgemm[v0, v1, v2, v3] = bgemm_local[v0, v1, v2, v3]
for i2_0_i3_0_i2_1_i3_1_fused_0 in T.thread_binding(18, thread="blockIdx.x"):
for i2_0_i3_0_i2_1_i3_1_fused_1 in T.thread_binding(64, thread="threadIdx.x"):
for i0 in T.unroll(4):
for i1 in T.unroll(4):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("inverse"):
vh, vw = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(9, (i2_0_i3_0_i2_1_i3_1_fused_0 * 64 + i2_0_i3_0_i2_1_i3_1_fused_1)
co = T.axis.spatial(128, (i2_0_i3_0_i2_1_i3_1_fused_0 * 64 + i2_0_i3_0_i2_1_i3_1_fused_1) % 384
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(bgemm[r_a, r_b, p, co])
T.writes(inverse[vh, vw, p, co])
T.block_attr({"schedule_rule":"conv2d_nhwc_winograd_inverse"})
with T.init():
inverse[vh, vw, p, co] = T.float32(0)
inverse[vh, vw, p, co] = inverse[vh, vw, p, co] + bgemm[r_a, r_b, p, co] * T.Select(r_a % 6 == 5 and vh % 4 == 3, T.float32(1), T.Select(r_a % 6 == 5 and vh % 4 == 2, T.float32(0), T.Select(r_a % 6 == 5 and vh % 4 == 1, T.float32(0), T.Select(r_a % 6 == 5 and vh % 4 == 0, T.float32(0), T.Select(r_a % 6 == 4 and vh % 4 == 3, T.f |
loat32(-8), T.Select(r_a % 6 == 4 and vh % 4 == 2, T.float32(4), T.Select(r_a % 6 == 4 and vh % 4 == 1, T.float32(-2), T.Select(r_a % 6 == 4 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 3 and vh % 4 == 3, T.float32(0.125), T.Select(r_a % 6 == 3 and vh % 4 == 2, T.float32(0.25), T.Select(r_a % 6 == 3 and vh % 4 == 1, T.float32(0.5), T.Select(r_a % 6 == 3 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 3, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 2, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 1, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 1 and vh % 4 == 3, T.float32(-1), T.Select(r_a % 6 == 1 and vh % 4 == 2, T.float32(1), T.Select(r_a % 6 == 1 and vh % 4 == 1, T.float32(-1), T.Select(r_a % 6 == 1 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 0 and vh % 4 == 3, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 2, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 1, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))) * T.Select(r_b % 6 == 5 and vw % 4 == 3, T.float32(1), T.Select(r_b % 6 == 5 and vw % 4 == 2, T.float32(0), T.Select(r_b % 6 == 5 and vw % 4 == 1, T.float32(0), T.Select(r_b % 6 == 5 and vw % 4 == 0, T.float32(0), T.Select(r_b % 6 == 4 and vw % 4 == 3, T.float32(-8), T.Select(r_b % 6 == 4 and vw % 4 == 2, T.float32(4), T.Select(r_b % 6 == 4 and vw % 4 == 1, T.float32(-2), T.Select(r_b % 6 == 4 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 3 and vw % 4 == 3, T.float32(0.125), T.Select(r_b % 6 == 3 and vw % 4 == 2, T.float32(0.25), T.Select(r_b % 6 == 3 and vw % 4 == 1, T.float32(0.5), T.Select(r_b % 6 == 3 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 3, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 2, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 1, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 1 and vw % 4 == 3, T.float32(-1), T.Select(r_b % 6 == 1 and vw % 4 == 2, T.float32(1), T.Select(r_b |
% 6 == 1 and vw % 4 == 1, T.float32(-1), T.Select(r_b % 6 == 1 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 0 and vw % 4 == 3, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 2, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 1, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))
for i0_i1_i2_i3_fused_0 in T.thread_binding(144, thread="blockIdx.x"):
for i0_i1_i2_i3_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("conv2d_winograd"):
n = T.axis.spatial(1, 0)
h = T.axis.spatial(12, (i0_i1_i2_i3_fused_0 * 128 + i0_i1_i2_i3_fused_1)
w = T.axis.spatial(12, (i0_i1_i2_i3_fused_0 * 128 + i0_i1_i2_i3_fused_1) % 1536
co = T.axis.spatial(128, (i0_i1_i2_i3_fused_0 * 128 + i0_i1_i2_i3_fused_1) % 128)
T.reads(inverse[h % 4, w % 4, n * 9 + h
T.writes(conv2d_winograd[n, h, w, co])
conv2d_winograd[n, h, w, co] = inverse[h % 4, w % 4, n * 9 + h
decision_0 = [
("SamplePerfectTile", [3, 3]),
("SamplePerfectTile", [16, 8]),
("SampleCategorical", 1),
("SamplePerfectTile", [3, 3]),
("SamplePerfectTile", [16, 8]),
("SampleCategorical", 5),
("SamplePerfectTile", [3, 1, 1, 2, 1]),
("SamplePerfectTile", [1, 2, 3, 1, 1]),
("SamplePerfectTile", [1, 1, 9, 1, 1]),
("SamplePerfectTile", [32, 2, 1, 2, 1]),
("SamplePerfectTile", [8, 1, 16]),
("SampleCategorical", 0),
("SampleCategorical", 2),
("SampleCategorical", 1),
("SampleCategorical", 2),
]
with _target():
mod = create_te_workload("C2D_WIN_NHWC", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cuda_nhwc_0],
expected_decisions=[decision_0],
)
def test_cuda_nchw(): |
@T.prim_func
def cuda_nchw_0(data: T.Buffer[(1, 64, 56, 56), "float32"], weight: T.Buffer[(6, 6, 64, 64), "float32"], conv2d_winograd: T.Buffer[(1, 64, 56, 56), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":16})
input_tile_local = T.alloc_buffer([64, 196, 6, 6], dtype="float32", scope="local")
data_pack = T.alloc_buffer([6, 6, 64, 196], dtype="float32")
bgemm = T.alloc_buffer([6, 6, 64, 196], dtype="float32")
inverse_local = T.alloc_buffer([64, 196, 4, 4], dtype="float32", scope="local")
data_pack_local = T.alloc_buffer([6, 6, 64, 196], dtype="float32", scope="local")
bgemm_local = T.alloc_buffer([6, 6, 64, 196], dtype="float32", scope="local")
data_pack_shared = T.alloc_buffer([6, 6, 64, 196], dtype="float32", scope="shared")
weight_shared = T.alloc_buffer([6, 6, 64, 64], dtype="float32", scope="shared")
for i2_i3_fused_0 in T.thread_binding(25, thread="blockIdx.x"):
for i2_i3_fused_1 in T.thread_binding(512, thread="threadIdx.x"):
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 6, 6):
with T.block("input_tile"):
T.where(i2_i3_fused_0 * 512 + i2_i3_fused_1 < 12544)
ci = T.axis.spatial(64, (i2_i3_fused_0 * 512 + i2_i3_fused_1)
p = T.axis.spatial(196, (i2_i3_fused_0 * 120 + i2_i3_fused_1) % 196 + ax1)
eps, nu = T.axis.remap("SS", [ax2, ax3])
T.reads(data[p
T.writes(input_tile_local[ci, p, eps, nu])
T.block_attr({"schedule_rule":"None"})
input_tile_local[ci, p, eps, nu] = T.if_then_else(1 <= p % 196 |
for i0 in T.unroll(6):
for i1 in T.unroll(6):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("data_pack"):
T.where(i2_i3_fused_0 * 512 + i2_i3_fused_1 < 12544)
eps, nu = T.axis.remap("SS", [i0, i1])
ci = T.axis.spatial(64, (i2_i3_fused_0 * 512 + i2_i3_fused_1)
p = T.axis.spatial(196, (i2_i3_fused_0 * 512 + i2_i3_fused_1) % 196)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(input_tile_local[ci, p, r_a, r_b])
T.writes(data_pack_local[eps, nu, ci, p])
T.block_attr({"schedule_rule":"conv2d_nchw_winograd_data_pack"})
with T.init():
data_pack_local[eps, nu, ci, p] = T.float32(0)
data_pack_local[eps, nu, ci, p] = data_pack_local[eps, nu, ci, p] + input_tile_local[ci, p, r_a, r_b] * T.Select(r_a % 6 == 5 and eps % 6 == 5, T.float32(1), T.Select(r_a % 6 == 5 and eps % 6 == 4, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 3, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 2, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 1, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 0, T.float32(0), T.Select(r_a % 6 == 4 and eps % 6 == 5, T.float32(1.5), T.Select(r_a % 6 == 4 and eps % 6 == 4, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 3, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 2, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 1, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 0, T.float32(1), T.Select(r_a % 6 == 3 and eps % 6 == 5, T.float32(-2), T.Select(r_a % 6 == 3 and eps % 6 == 4, T.float32(-0.5), T.Select(r_a % 6 |
== 3 and eps % 6 == 3, T.float32(2), T.Select(r_a % 6 == 3 and eps % 6 == 2, T.float32(2.5), T.Select(r_a % 6 == 3 and eps % 6 == 1, T.float32(0.5), T.Select(r_a % 6 == 3 and eps % 6 == 0, T.float32(1.5), T.Select(r_a % 6 == 2 and eps % 6 == 5, T.float32(-1.5), T.Select(r_a % 6 == 2 and eps % 6 == 4, T.float32(-1), T.Select(r_a % 6 == 2 and eps % 6 == 3, T.float32(-1), T.Select(r_a % 6 == 2 and eps % 6 == 2, T.float32(0.5), T.Select(r_a % 6 == 2 and eps % 6 == 1, T.float32(-2.5), T.Select(r_a % 6 == 2 and eps % 6 == 0, T.float32(-2), T.Select(r_a % 6 == 1 and eps % 6 == 5, T.float32(1), T.Select(r_a % 6 == 1 and eps % 6 == 4, T.float32(0.5), T.Select(r_a % 6 == 1 and eps % 6 == 3, T.float32(-2), T.Select(r_a % 6 == 1 and eps % 6 == 2, T.float32(-1), T.Select(r_a % 6 == 1 and eps % 6 == 1, T.float32(1), T.Select(r_a % 6 == 1 and eps % 6 == 0, T.float32(-1.5), T.Select(r_a % 6 == 0 and eps % 6 == 5, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 4, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 3, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 2, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 1, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))))))))))))))) * T.Select(r_b % 6 == 5 and nu % 6 == 5, T.float32(1), T.Select(r_b % 6 == 5 and nu % 6 == 4, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 3, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 2, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 1, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 0, T.float32(0), T.Select(r_b % 6 == 4 and nu % 6 == 5, T.float32(1.5), T.Select(r_b % 6 == 4 and nu % 6 == 4, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 3, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 2, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 1, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 0, T.float32(1), T.Select(r_b % 6 == 3 and nu % 6 == 5, T.float32(-2), T.Select(r_b % 6 == 3 and nu % 6 == 4, T.float32(-0.5), T.Select(r_b % 6 == 3 and nu % 6 == 3, T.float32(2), T |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.