text
stringlengths 1
2.05k
|
---|
00010748916,
27.000000010748916,
0.0,
0.0,
0.0,
0.0,
0.0,
28.000000005374456,
28.000000005374456,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[49:57],
desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[57:75],
desired=[
1.0,
0.0,
0.0,
29.00000000268723,
9.002815015607053,
23.00000017198264,
3.169925001442312,
1.0,
0.0,
0.0,
5.044394119358453,
7.651051691178929,
5.044394119358453,
24.000000085991324,
4.087462841250339,
18.00000550343433,
0.32192809488736235,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[75:93],
desired=[
0.0,
0.0,
1.0,
29.00000000268723,
11.000704269011246,
23.00000017198264,
5.044394119358453,
1.0,
0.0,
0.0,
4.087462841250339,
7.05528243550119,
1.584962500721156,
28.00000000 |
5374456,
10.001408194392809,
22.00000034396526,
4.087462841250339,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[93:111],
desired=[
1.0,
0.0,
0.0,
29.00000000268723,
12.0003521774803,
19.00000275171979,
9.002815015607053,
1.0,
0.0,
0.0,
1.0,
3.700439718141092,
4.087462841250339,
25.000000042995662,
8.005624549193879,
15.000044026886828,
5.044394119358453,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[111:129],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[129:147],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[147:157],
desired=[
11.98533,
12.977811,
13.562714,
13.977722,
14.299632,
14.562654,
14.785038,
14.977677,
15.147597,
15.299596,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[157:164],
desire |
d=[
11.000704269011246,
18.00000550343433,
9.002815015607053,
18.00000550343433,
27.000000010748916,
3.0,
10.001408,
],
rtol=1e-5,
atol=1e-5,
)
f = feature[3]
assert_allclose(
actual=f[0:16],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[49:57],
desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[57:75],
desired=[
0.0,
1.0,
0.0,
20.000001375860553,
20.000001375860553,
14.000088052430122,
14.000088052430122,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
21.000000687930438,
21.000000687930438,
15.000044026886828,
15.000044026886828,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[75:93],
desired=[
1.0,
0.0,
0.0,
20.000001375860553,
11.000704269011246,
14.000088052430122,
5.044394119358453,
1.0,
0.0,
0.0,
9.002815015607053 |
,
12.0003521774803,
4.087462841250339,
16.00002201361136,
7.011227255423254,
10.001408194392809,
1.584962500721156,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[93:111],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[111:129],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[129:147],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[147:157],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[157:164],
desired=[
20.000001375860553,
18.00000550343433,
1.0,
18.00000550343433,
18.00000550343433,
2.584962500721156,
10.001408,
],
rtol=1e-5,
atol=1e-5,
)
def |
test_cpu_layout_transform():
extractor = ms.feature_extractor.PerStoreFeature()
(feature,) = extractor.extract_from(
_make_context(tvm.target.Target("llvm")),
candidates=[_make_candidate(lambda: tir.Schedule(LayoutTransform))],
)
@T.prim_func
def negative_extent(A: T.Buffer[(1,), "float32"]):
for j in range(0, -1):
A[j] = A[j] + 1.0
def test_negative_extent():
extractor = ms.feature_extractor.PerStoreFeature()
(features,) = extractor.extract_from(
_make_context(tvm.target.Target("llvm")),
candidates=[_make_candidate(lambda: tir.Schedule(negative_extent))],
)
named_features = dict(zip(_feature_names(), list(features.numpy()[0, :])))
assert named_features["B0.unique_bytes"] == 0
if __name__ == "__main__":
tvm.testing.main() |
import re |
import tempfile
from typing |
import List |
import pytest |
import tvm
from tvm |
import meta_schedule as ms
from tvm.script |
import tir as T
from tvm.tir.schedule |
import Schedule
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
def test_meta_schedule_measure_callback():
@ms.derived_object |
class FancyMeasureCallback(ms.measure_callback.PyMeasureCallback):
def apply(
self,
task_scheduler: ms.task_scheduler.TaskScheduler,
task_id: int,
measure_candidates: List[ms.MeasureCandidate],
builder_results: List[ms.builder.BuilderResult],
runner_results: List[ms.runner.RunnerResult],
) -> None:
assert len(measure_candidates) == 1
tvm.ir.assert_structural_equal(measure_candidates[0].sch.mod, Matmul)
assert (
len(builder_results) == 1
and builder_results[0].error_msg is None
and builder_results[0].artifact_path == "test_build"
)
assert (
len(runner_results) == 1
and runner_results[0].error_msg is None
and len(runner_results[0].run_secs) == 2
)
measure_callback = FancyMeasureCallback()
measure_callback.apply(
ms.task_scheduler.RoundRobin(),
0,
[ms.MeasureCandidate(Schedule(Matmul), None)],
[ms.builder.BuilderResult("test_build", None)],
[ms.runner.RunnerResult([1.0, 2.1], None)],
)
def test_meta_schedule_measure_callback_fail():
@ms.derived_object |
class FailingMeasureCallback(ms.measure_callback.PyMeasureCallback):
def apply(
self,
task_scheduler: ms.task_scheduler.TaskScheduler,
task_id: int,
measure_candidates: List[ms.MeasureCandidate],
builder_results: List[ms.builder.BuilderResult],
runner_results: List[ms.runner.RunnerResult],
) -> None:
raise ValueError("test")
measure_callback = FailingMeasureCallback()
with pytest.raises(ValueError, match="test"):
measure_callback.apply(
ms.task_scheduler.RoundRobin(),
0,
[ms.MeasureCandidate(Schedule(Matmul), None)],
[ms.builder.BuilderResult("test_build", None)],
[ms.runner.RunnerResult([1.0, 2.1], None)],
)
def test_meta_schedule_measure_callback_as_string():
@ms.derived_object |
class NotSoFancyMeasureCallback(ms.measure_callback.PyMeasureCallback):
def apply(
self,
task_scheduler: ms.task_scheduler.TaskScheduler,
task_id: int,
measure_candidates: List[ms.MeasureCandidate],
builder_results: List[ms.builder.BuilderResult],
runner_results: List[ms.runner.RunnerResult],
) -> None:
pass
measure_callback = NotSoFancyMeasureCallback()
pattern = re.compile(r"meta_schedule.NotSoFancyMeasureCallback\(0x[a-f|0-9]*\)")
assert pattern.match(str(measure_callback))
def test_meta_schedule_measure_callback_update_cost_model_with_zero():
@ms.derived_object |
class AllZeroRunnerFuture(ms.runner.PyRunnerFuture):
def done(self) -> bool:
return True
def result(self) -> ms.runner.RunnerResult:
return ms.runner.RunnerResult([0.0, 0.0], None)
@ms.derived_object |
class AllZeroRunner(ms.runner.PyRunner):
def run(self, runner_inputs: List[ms.runner.RunnerInput]) -> List[ms.runner.RunnerResult]:
return [AllZeroRunnerFuture() for _ in runner_inputs]
with tempfile.TemporaryDirectory() as work_dir:
ms.tune_tir(
mod=Matmul,
target="llvm -num-cores=1",
work_dir=work_dir,
max_trials_global=10,
runner=AllZeroRunner(),
measure_callbacks=[ms.measure_callback.UpdateCostModel()],
)
if __name__ == "__main__":
test_meta_schedule_measure_callback()
test_meta_schedule_measure_callback_fail()
test_meta_schedule_measure_callback_as_string()
test_meta_schedule_measure_callback_update_cost_model_with_zero() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import meta_schedule as ms
from tvm |
import relay
def get_dense_dense(data_shape, weight_shape):
def multi_dense():
p_data = relay.var("p_data", shape=data_shape, dtype="float32")
p_weight1 = relay.var("p_weight1", shape=weight_shape, dtype="float32")
p_weight2 = relay.var("p_weight2", shape=weight_shape, dtype="float32")
dense1 = relay.nn.dense(p_data, p_weight1)
dense2 = relay.nn.dense(dense1, p_weight2)
f = relay.Function([p_data, p_weight1, p_weight2], dense2)
f = f.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
return f
data = relay.var("data", shape=data_shape, dtype="float32")
weight1 = relay.var("weight1", shape=weight_shape, dtype="float32")
weight2 = relay.var("weight2", shape=weight_shape, dtype="float32")
out = relay.Call(multi_dense(), [data, weight1, weight2])
return relay.Function([data, weight1, weight2], out)
def get_ref(data_np, weight1_np, weight2_np):
dense1 = np.dot(data_np, np.transpose(weight1_np))
return np.dot(dense1, np.transpose(weight2_np))
def schedule_dense_dense(sch):
dense1 = sch.get_block("T_matmul_NT")
dense2 = sch.get_block("T_matmul_NT_1")
_y1, _x1, _k1 = sch.get_loops(dense1)
_y2, _x2, _k2 = sch.get_loops(dense2)
def test_dense_dense():
M, N, K = 128, 128, 128
data_shape = (M, K)
weight_shape = (N, K)
relay_mod = tvm.IRModule.from_expr(get_dense_dense(data_shape, weight_shape))
data_np = np.random.randn(*data_shape).astype("float32")
weight1_np = np.random.randn(*weight_shape).astype("float32")
weight2_np = np.random.randn(*weight_shape).astype("float32")
target = "llvm"
params = {"weight1": weight1_np, "weight2": weight2_np}
def schedule_fn(sch):
if "nn_dense_nn_dense" in sch.mod.attrs["task_name"]:
schedule_dense_dense(sch)
return True
return False
with ms.database.ScheduleFnDatabase(schedule_fn):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_meta |
_schedule": True},
):
lib = relay.build(relay_mod, target=target, params=params)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = get_ref(data_np, weight1_np, weight2_np)
tvm.testing.assert_allclose(out, ref, atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
test_dense_dense() |
from tvm |
import meta_schedule as ms
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.tir |
import Schedule
@T.prim_func
def add(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, [2048, 2048, 2048], dtype="float32")
B = T.match_buffer(b, [2048, 2048, 2048], dtype="float32")
A_cached = T.alloc_buffer([2048, 2048, 2048], dtype="float32")
for i, j, k in T.grid(2048, 2048, 2048):
with T.block("move"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([A_cached[vi, vj, vk]])
A_cached[vi, vj, vk] = A[vi, vj, vk]
for i0, j0, i1, j1, k0, i2, j2, k1 in T.grid(128, 64, 4, 4, 64, 4, 8, 32):
with T.block("add"):
vi = T.axis.spatial(2048, i0 * 16 + i1 * 4 + i2)
vj = T.axis.spatial(2048, j0 * 32 + j1 * 8 + j2)
vk = T.axis.spatial(2048, k0 * 32 + k1)
T.reads([A_cached[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A_cached[vi, vj, vk] + T.float32(1)
def _sch(decision: int) -> Schedule:
sch = Schedule(add, debug_mask="all")
b0 = sch.get_block(name="move", func_name="main")
l1 = sch.sample_compute_location(block=b0, decision=decision)
sch.compute_at(block=b0, loop=l1, preserve_unit_loops=True)
return sch
def _make_mutator(target: Target) -> ms.Mutator:
ctx = ms.TuneContext(
mod=add,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[],
mutator_probs={ms.mutator.MutateComputeLocation(): 1.0},
),
)
return list(ctx.space_generator.mutator_probs.keys())[0]
def test_mutate_compute_location_add():
mutator = _make_mutator(
target=Target("llvm"),
)
sch = _sch(decision=4)
results = set()
for _ in range(100):
trace = mutator.apply(sch.trace)
decision = trace.decisions[trace.insts[-2]]
assert not decision == 4
results.add(decision)
assert len(results) == 9
i |
f __name__ == "__main__":
test_mutate_compute_location_add() |
from typing |
import List
from tvm |
import meta_schedule as ms
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.tir |
import Schedule
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [512, 512])
B = T.match_buffer(b, [512, 512])
C = T.match_buffer(c, [512, 512])
for i, j, k in T.grid(512, 512, 512):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def _sch(decisions: List[List[int]], ann_val: int) -> Schedule:
sch = Schedule(matmul, debug_mask="all")
d0, d1, d2 = decisions
b0 = sch.get_block(name="C", func_name="main")
root = sch.get_block(name="root", func_name="main")
sch.get_consumers(block=b0)
b1 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="global")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8 = sch.sample_perfect_tile(
loop=l2,
n=4,
max_innermost_factor=64,
decision=d0,
)
l9, l10, l11, l12 = sch.split(loop=l2, factors=[v5, v6, v7, v8])
v13, v14, v15, v16 = sch.sample_perfect_tile(
loop=l3,
n=4,
max_innermost_factor=64,
decision=d1,
)
l17, l18, l19, l20 = sch.split(loop=l3, factors=[v13, v14, v15, v16])
v21, v22 = sch.sample_perfect_tile(
loop=l4,
n=2,
max_innermost_factor=64,
decision=d2,
)
l23, l24 = sch.split(loop=l4, factors=[v21, v22])
sch.reorder(l9, l17, l10, l18, l23, l11, l19, l24, l12, l20)
sch.reverse_compute_at(block=b1, loop=l18, preserve_unit_loops=True)
sch.annotate(block_or_loop=root, ann_key="meta_schedule.parallel", ann_val=ann_val)
return sch
def _make_mutator(target: Target, max_jobs_per_core: int) -> ms.Mutator:
ctx = ms.TuneContext(
mod=matmul,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[],
mutator_probs={ms.mutator.MutateParallel(max_jobs_per_core): 1.0}, |
),
)
return list(ctx.space_generator.mutator_probs.keys())[0]
def test_mutate_parallel_matmul():
mutator = _make_mutator(
target=Target("llvm --num-cores=16"),
max_jobs_per_core=256,
)
sch = _sch(
decisions=[
[4, 32, 4, 1],
[8, 4, 8, 2],
[512, 1],
],
ann_val=64,
)
results = set()
for _ in range(100):
trace = mutator.apply(sch.trace)
ann_val = int(trace.insts[-1].inputs[1])
results.add(ann_val)
if len(results) == 3:
break
assert len(results) == 3
assert results == {4, 32, 4096}
if __name__ == """__main__""":
test_mutate_parallel_matmul() |
from tvm |
import meta_schedule as ms
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.tir |
import Schedule
@T.prim_func
def element_wise(var_A: T.handle, var_B: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
for i, j in T.grid(512, 512):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + 1.0
def _sch() -> Schedule:
sch = Schedule(element_wise, debug_mask="all")
b0 = sch.get_block(name="C", func_name="main")
l1, l2 = sch.get_loops(block=b0)
l3 = sch.fuse(l1, l2)
v4 = sch.sample_categorical(
candidates=[32, 64, 128, 256, 512, 1024],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=3,
)
l5, l6 = sch.split(loop=l3, factors=[None, v4])
sch.bind(loop=l5, thread_axis="blockIdx.x")
sch.bind(loop=l6, thread_axis="threadIdx.x")
return sch
def _make_mutator(target: Target) -> ms.Mutator:
ctx = ms.TuneContext(
mod=element_wise,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[],
mutator_probs={ms.mutator.MutateThreadBinding(): 1.0},
),
)
return list(ctx.space_generator.mutator_probs.keys())[0]
def test_mutate_thread_binding():
mutator = _make_mutator(target=Target("cuda"))
sch = _sch()
results = set()
for _ in range(100):
trace = mutator.apply(sch.trace)
decision = trace.decisions[trace.insts[-4]]
results.add(decision)
if len(results) == 5:
break
assert len(results) == 5
assert results == {0, 1, 2, 4, 5}
if __name__ == "__main__":
test_mutate_thread_binding() |
import operator
from functools |
import reduce
from typing |
import List
from tvm |
import meta_schedule as ms
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.tir |
import Schedule
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [512, 512])
B = T.match_buffer(b, [512, 512])
C = T.match_buffer(c, [512, 512])
for i, j, k in T.grid(512, 512, 512):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def _sch(decisions: List[List[int]]) -> Schedule:
sch = Schedule(matmul, debug_mask="all")
(d0,) = decisions
b0 = sch.get_block(name="C", func_name="main")
sch.get_consumers(block=b0)
b1 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="global")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8 = sch.sample_perfect_tile(
loop=l2,
n=4,
max_innermost_factor=64,
decision=d0,
)
l9, l10, l11, l12 = sch.split(loop=l2, factors=[v5, v6, v7, v8])
l17, l18, l19, l20 = sch.split(loop=l3, factors=[8, 4, 8, 2])
l23, l24 = sch.split(loop=l4, factors=[512, 1])
sch.reorder(l9, l17, l10, l18, l23, l11, l19, l24, l12, l20)
sch.reverse_compute_at(block=b1, loop=l18, preserve_unit_loops=True)
return sch
def _make_mutator(target: Target) -> ms.Mutator:
ctx = ms.TuneContext(
mod=matmul,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[],
mutator_probs={ms.mutator.MutateTileSize(): 1.0},
),
)
return list(ctx.space_generator.mutator_probs.keys())[0]
def test_mutate_tile_size_matmul():
mutator = _make_mutator(
target=Target("llvm --num-cores=16"),
)
results = {}
sch = _sch(decisions=[[4, 32, 4, 1]])
for _ in range(1000):
trace = mutator.apply(sch.trace)
assert trace.insts[4].kind.name == "SamplePerfectTile"
decision = trace.decisions[trace.insts[4]]
decision = [int(x) for x in decision] |
results[str(decision)] = decision
assert reduce(operator.mul, decision, 1) == 512
assert len(results) > 15
if __name__ == "__main__":
test_mutate_tile_size_matmul() |
from typing |
import List
from tvm |
import meta_schedule as ms
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.tir |
import Schedule
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [512, 512])
B = T.match_buffer(b, [512, 512])
C = T.match_buffer(c, [512, 512])
for i, j, k in T.grid(512, 512, 512):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def _sch(decisions: List[List[int]]) -> Schedule:
sch = Schedule(matmul, debug_mask="all")
d0, d1, d2 = decisions
b0 = sch.get_block(name="C", func_name="main")
root = sch.get_block(name="root", func_name="main")
sch.get_consumers(block=b0)
b1 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="global")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8 = sch.sample_perfect_tile(
loop=l2,
n=4,
max_innermost_factor=64,
decision=d0,
)
l9, l10, l11, l12 = sch.split(loop=l2, factors=[v5, v6, v7, v8])
v13, v14, v15, v16 = sch.sample_perfect_tile(
loop=l3,
n=4,
max_innermost_factor=64,
decision=d1,
)
l17, l18, l19, l20 = sch.split(loop=l3, factors=[v13, v14, v15, v16])
v21, v22 = sch.sample_perfect_tile(
loop=l4,
n=2,
max_innermost_factor=64,
decision=d2,
)
l23, l24 = sch.split(loop=l4, factors=[v21, v22])
sch.reorder(l9, l17, l10, l18, l23, l11, l19, l24, l12, l20)
sch.reverse_compute_at(block=b1, loop=l18, preserve_unit_loops=True)
v57 = sch.sample_categorical(
candidates=[0, 16, 64, 512],
probs=[0.25, 0.25, 0.25, 0.25],
decision=0,
)
sch.annotate(block_or_loop=root, ann_key="meta_schedule.unroll_explicit", ann_val=v57)
return sch
def _make_mutator(target: Target) -> ms.Mutator:
ctx = ms.TuneContext(
mod=matmul,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[], |
postprocs=[],
mutator_probs={ms.mutator.MutateUnroll(): 1.0},
),
)
return list(ctx.space_generator.mutator_probs.keys())[0]
def test_mutate_unroll_matmul():
mutator = _make_mutator(target=Target("llvm --num-cores=16"))
sch = _sch(
decisions=[
[4, 32, 4, 1],
[8, 4, 8, 2],
[512, 1],
],
)
results = set()
for _ in range(100):
trace = mutator.apply(sch.trace)
decision = trace.decisions[trace.insts[-2]]
results.add(decision)
if len(results) == 3:
break
assert len(results) == 3
assert results == {1, 2, 3}
if __name__ == """__main__""":
test_mutate_unroll_matmul() |
import math |
import sys
from typing |
import List |
import pytest |
import tvm |
import tvm.testing
from tvm._ffi |
import register_func
from tvm.error |
import TVMError
from tvm.meta_schedule |
import TuneContext
from tvm.meta_schedule.schedule_rule |
import PyScheduleRule
from tvm.meta_schedule.space_generator |
import PostOrderApply
from tvm.meta_schedule.utils |
import derived_object
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.tir.schedule |
import BlockRV, Schedule
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class DuplicateMatmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class TrinityMatmul:
@T.prim_func
def main(a: T.handle, d: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.alloc_buffer((1024, 1024), "float32")
C = T.alloc_buffer((1024, 1024), "float32")
D = T.match_buffer(d, (1024, 1024), "float32")
for i, j in T.grid(1024, 1024):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(1024, 1024):
with T.bloc |
k("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 3.0
for i, j in T.grid(1024, 1024):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = C[vi, vj] * 5.0
@tvm.script.ir_module
class TrinityMatmulProcessedForReference:
@T.prim_func
def main(a: T.handle, d: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, [1024, 1024], dtype="float32")
D = T.match_buffer(d, [1024, 1024], dtype="float32")
B = T.alloc_buffer([1024, 1024], dtype="float32")
for i0_0, i1_0, i0_1, i1_1 in T.grid(16, 64, 64, 16):
with T.block("A"):
vi = T.axis.S(1024, i0_0 * 64 + i0_1)
vj = T.axis.S(1024, i1_0 * 16 + i1_1)
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i0_0, i1_0, i0_1, i1_1 in T.grid(16, 64, 64, 16):
with T.block("C"):
vi = T.axis.S(1024, i0_0 * 64 + i0_1)
vj = T.axis.S(1024, i1_0 * 16 + i1_1)
T.reads([B[vi, vj]])
T.writes([D[vi, vj]])
D[vi, vj] = (B[vi, vj] + T.float32(3)) * T.float32(5)
def _is_root(sch: Schedule, block: BlockRV) -> bool:
return sch.get_sref(block).parent is None
def _check_correct(schedule: Schedule):
trace = schedule.trace
for inst in trace.decisions:
assert math.prod(trace.decisions[inst]) == 1024
@derived_object |
class WowSoFancyScheduleRule(PyScheduleRule):
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
if _is_root(sch, block):
return [sch]
new_sch = sch.copy()
i, j, k = new_sch.get_loops(block=block)
i_0, i_1, i_2, i_3 = new_sch.split(loop=i, factors=[2, 4, 64, 2])
j_0, j_1, j_2, j_3 = new_sch.split(loop=j, factors=[4, 64, 2, 2])
k_0, k_1 = new_sch.split(loop=k, factors=[32, 32])
new_sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
return [new_sch]
@derived_object |
class DoubleScheduleRule(PyScheduleRule):
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
if _is_root(sch, block):
return [sch]
new_sch = sch.copy()
i, j, k = new_sch.get_loops(block=block)
i_0, i_1, i_2, i_3 = new_sch.split(loop=i, factors=[4, 64, 2, 2])
j_0, j_1, j_2, j_3 = new_sch.split(loop=j, factors=[2, 4, 64, 2])
k_0, k_1 = new_sch.split(loop=k, factors=[32, 32])
new_sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
result = [new_sch]
new_sch = sch.copy()
i, j, k = new_sch.get_loops(block=block)
i_0, i_1, i_2, i_3 = new_sch.split(loop=i, factors=[4, 64, 2, 2])
j_0, j_1, j_2, j_3 = new_sch.split(loop=j, factors=[2, 4, 64, 2])
k_0, k_1 = new_sch.split(loop=k, factors=[32, 32])
new_sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
result.append(new_sch)
return result
@derived_object |
class TrinityDoubleRule(PyScheduleRule):
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
if _is_root(sch, block):
return [sch]
new_sch = sch.copy()
i, j = new_sch.get_loops(block=block)
i_0, i_1 = new_sch.split(loop=i, factors=[16, 64])
j_0, j_1 = new_sch.split(loop=j, factors=[64, 16])
new_sch.reorder(i_0, j_0, i_1, j_1)
result = [new_sch]
new_sch = sch.copy()
i, j = new_sch.get_loops(block=block)
i_0, i_1 = new_sch.split(loop=i, factors=[2, 512])
j_0, j_1 = new_sch.split(loop=j, factors=[2, 512])
new_sch.reorder(i_0, j_0, i_1, j_1)
result.append(new_sch)
return result
@derived_object |
class ReorderScheduleRule(PyScheduleRule):
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
if _is_root(sch, block):
return [sch]
new_sch = sch.copy()
i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3 = new_sch.get_loops(block=block)
new_sch.reorder(i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3, i_0, j_0)
result = [new_sch]
new_sch = sch.copy()
i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3 = new_sch.get_loops(block=block)
new_sch.reorder(i_1, j_3, i_0, j_0, j_1, k_0, i_2, j_2, k_1, i_3)
result.append(new_sch)
return result
def test_meta_schedule_post_order_apply():
mod = Matmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Test Task",
space_generator=PostOrderApply(
sch_rules=[WowSoFancyScheduleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
assert len(schs) == 1
assert not tvm.ir.structural_equal(schs[0].mod, mod)
_check_correct(schs[0])
def test_meta_schedule_post_order_apply_double():
mod = Matmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Double Rules Task",
space_generator=PostOrderApply(
sch_rules=[DoubleScheduleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
assert len(schs) == 2
for sch in schs:
assert not tvm.ir.structural_equal(sch.mod, mod)
_check_correct(sch)
def test_meta_schedule_post_order_apply_multiple():
mod = Matmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Double Rules Task",
space_g |
enerator=PostOrderApply(
sch_rules=[DoubleScheduleRule(), ReorderScheduleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
assert len(schs) == 4
for sch in schs:
assert not tvm.ir.structural_equal(sch.mod, mod)
_check_correct(sch)
def test_meta_schedule_post_order_apply_duplicate_matmul():
mod = DuplicateMatmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Duplicate Matmul Task",
space_generator=PostOrderApply(
sch_rules=[WowSoFancyScheduleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
with pytest.raises(
TVMError,
match=r".*TVMError: Check failed: \(block_names_.count\(block->name_hint\) == 0\)"
r" is false: Duplicated block name matmul in function main not supported!",
):
post_order_apply.generate_design_space(mod)
def test_meta_schedule_post_order_apply_remove_block():
@derived_object |
class RemoveBlock(PyScheduleRule):
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
if _is_root(sch, block):
return [sch]
sch = sch.copy()
if sch.get(block).name_hint == "B":
sch.compute_inline(block)
return [sch]
def correct_trace(a, b, c, d):
return "\n".join(
[
"
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="A", func_name="main")',
' b1 = sch.get_block(name="B", func_name="main")',
' b2 = sch.get_block(name="C", func_name="main")',
" sch.compute_inline(block=b1)",
" l3, l4 = sch.get_loops(block=b2)",
" l5, l6 = sch.split(loop=l3, factors=" + str(a) + ", preserve_unit_iters=True)",
" l7, l8 = sch.split(loop=l4, factors=" + str(b) + ", preserve_unit_iters=True)",
" sch.reorder(l5, l7, l6, l8)",
" l9, l10 = sch.get_loops(block=b0)",
" l11, l12 = sch.split(loop=l9, factors=" + str(c) + ", preserve_unit_iters=True)",
" l13, l14 = sch.split(loop=l10, factors="
+ str(d)
+ ", preserve_unit_iters=True)",
" sch.reorder(l11, l13, l12, l14)",
]
)
mod = TrinityMatmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Remove Block Task",
space_generator=PostOrderApply(
sch_rules=[RemoveBlock(), TrinityDoubleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
assert len(schs) == 4
for sch in schs:
with pytest.raises(
tvm.tir.schedule.schedule.ScheduleError, |
match="ScheduleError: An error occurred in the schedule primitive 'get-block'.",
):
sch.get_block("B", "main")
sch_trace = sch.trace.simplified(True)
assert (
str(sch_trace) == correct_trace([16, 64], [64, 16], [2, 512], [2, 512])
or str(sch_trace) == correct_trace([2, 512], [2, 512], [2, 512], [2, 512])
or str(sch_trace) == correct_trace([16, 64], [64, 16], [16, 64], [64, 16])
or str(sch_trace) == correct_trace([2, 512], [2, 512], [16, 64], [64, 16])
)
def test_target_blocks_search_space():
def filter_fn(block, target_names) -> bool:
return block.name_hint in target_names
def _get_sch(filter_fn):
mod = TrinityMatmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Custom Search Space Task",
space_generator=PostOrderApply(
f_block_filter=filter_fn,
sch_rules=[TrinityDoubleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
return schs
schs = _get_sch(None)
assert len(schs) == 8
schs = _get_sch(lambda block: filter_fn(block, ["B"]))
assert len(schs) == 2
schs = _get_sch(lambda block: filter_fn(block, ["A", "C"]))
assert len(schs) == 4
schs = _get_sch(lambda block: filter_fn(block, ["A", "B", "C"]))
assert len(schs) == 8
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import meta_schedule as ms
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.target |
import Target
def _target() -> Target:
return Target("cuda", host="llvm")
def _create_context(mod, target) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[
ms.postproc.DisallowDynamicLoop(),
],
mutator_probs={},
),
task_name="test",
)
return ctx
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class DynamicLoop:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j in T.grid(1024, 1024):
for k in T.serial(0, i):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
def test_postproc_disallow_dynamic_loops():
mod = Matmul
ctx = _create_context(mod, target=_target())
sch = tir.Schedule(mod, debug_mask="all")
assert ctx.space_generator.postprocs[0].apply(sch)
def test_postproc_disallow_dynamic_loops_fail():
mod = DynamicLoop
ctx = _create_context(mod, targe |
t=_target())
sch = tir.Schedule(mod, debug_mask="all")
assert not ctx.space_generator.postprocs[0].apply(sch)
if __name__ == "__main__":
test_postproc_disallow_dynamic_loops()
test_postproc_disallow_dynamic_loops_fail() |
import tvm |
import tvm.testing
from tvm |
import meta_schedule as ms
from tvm |
import tir
from tvm.meta_schedule.testing |
import te_workload
from tvm.script |
import tir as T
from tvm.target |
import Target
from tvm.te |
import create_prim_func
def _target() -> Target:
return Target("cuda", host="llvm")
def _create_context(mod, target) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[
ms.postproc.RewriteCooperativeFetch(),
],
mutator_probs={},
),
task_name="test",
)
return ctx
@tvm.script.ir_module
class AfterRewrite0:
@T.prim_func
def main(var_A: T.handle, var_B: T.handle, var_C: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
C = T.match_buffer(var_C, [512, 512], dtype="float32")
C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local")
A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(0, 16, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(0, 16, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(0, 8, thread="threadIdx.x"):
for i2_0 in T.serial(0, 1):
for ax0_ax1_fused_0 in T.serial(0, 32768):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"):
with T.block("A_shared"):
v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1)
v1 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1) % 512)
T.reads([A[v0, v1]])
T.writes([A_shared[v0, v1]])
A_shared[v0, v1] = A[v0, v1] |
for ax0_ax1_fused_0 in T.serial(0, 1024):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"):
for ax0_ax1_fused_2 in T.vectorized(0, 2):
with T.block("B_shared"):
v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2)
v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2) % 32)
T.reads([B[v0, v1]])
T.writes([B_shared[v0, v1]])
B_shared[v0, v1] = B[v0, v1]
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(16, 2, 2, 32, 16, 2):
with T.block("C"):
i = T.axis.spatial(512, i0_1_i1_1_fused * 32 + i0_3 * 16 + i0_4)
j = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + i1_3 * 2 + i1_4)
k = T.axis.reduce(512, i2_0 * 512 + i2_1 * 32 + i2_2)
T.reads([A_shared[i, k], B_shared[k, j]])
T.writes([C_local[i, j]])
with T.init():
C_local[i, j] = T.float32(0)
C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j]
for ax0, ax1 in T.grid(32, 4):
with T.block("C_local"):
v0 = T.axis.spatial(512, i0_1_i1_1_fused * 32 + ax0)
v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + ax1)
T.reads([C_local[v0, v1]])
T.writes([C[v0, v1]])
C[v0, v1] = C_local[v0, v1]
@tvm.script.ir_module
class WarpExecut |
ionAfterRewrite:
@T.prim_func
def main(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
C: T.Buffer[(512, 512), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local")
A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(0, 16, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(0, 16, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(0, 8, thread="threadIdx.y"):
for i2_0 in T.serial(0, 1):
for ax0_ax1_fused_0 in T.serial(0, 1024):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.y"):
for ax0_ax1_fused_2 in T.thread_binding(
0, 32, thread="threadIdx.x"
):
with T.block("A_shared"):
v0 = T.axis.spatial(
512,
(
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 32
+ ax0_ax1_fused_2
)
)
v1 = T.axis.spatial(
512,
(
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 32 |
+ ax0_ax1_fused_2
)
% 512,
)
T.reads([A[v0, v1]])
T.writes([A_shared[v0, v1]])
A_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused_0 in T.serial(0, 32):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.y"):
for ax0_ax1_fused_2 in T.thread_binding(
0, 32, thread="threadIdx.x"
):
for ax0_ax1_fused_3 in T.vectorized(0, 2):
with T.block("B_shared"):
v0 = T.axis.spatial(
512,
(
ax0_ax1_fused_0 * 512
+ ax0_ax1_fused_1 * 64
+ ax0_ax1_fused_2 * 2
+ ax0_ax1_fused_3
)
)
v1 = T.axis.spatial(
512,
i0_0_i1_0_fused * 32
+ (
ax0_ax1_fused_0 * 512
+ ax0_ax1_fused_1 * 64
+ ax0_ax1_fused_2 * 2
+ ax0_ax1_fused_3
) |
% 32,
)
T.reads([B[v0, v1]])
T.writes([B_shared[v0, v1]])
B_shared[v0, v1] = B[v0, v1]
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(16, 2, 2, 32, 16, 2):
with T.block("C"):
i = T.axis.spatial(512, i0_1_i1_1_fused * 32 + i0_3 * 16 + i0_4)
j = T.axis.spatial(
512,
i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + i1_3 * 2 + i1_4,
)
k = T.axis.reduce(512, i2_0 * 512 + i2_1 * 32 + i2_2)
T.reads([A_shared[i, k], B_shared[k, j]])
T.writes([C_local[i, j]])
T.block_attr({"warp_execution": 1})
with T.init():
C_local[i, j] = T.float32(0)
C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j]
for ax0, ax1 in T.grid(32, 4):
with T.block("C_local"):
v0 = T.axis.spatial(512, i0_1_i1_1_fused * 32 + ax0)
v1 = T.axis.spatial(
512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + ax1
)
T.reads([C_local[v0, v1]])
T.writes([C[v0, v1]])
C[v0, v1] = C_local[v0, v1]
def test_rewrite_cooperative_fetch():
mod = create_prim_func(te_workload.matmul(n=512, m=512, k=512))
target = _target()
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
b0 = sch.get_block(name="C", func_name="main")
b1 = sc |
h.cache_write(block=b0, write_buffer_index=0, storage_scope="local")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8, v9 = sch.sample_perfect_tile(loop=l2, n=5, max_innermost_factor=64, decision=[1, 16, 1, 2, 16])
l10, l11, l12, l13, l14 = sch.split(loop=l2, factors=[v5, v6, v7, v8, v9])
v15, v16, v17, v18, v19 = sch.sample_perfect_tile(loop=l3, n=5, max_innermost_factor=64, decision=[16, 1, 8, 2, 2])
l20, l21, l22, l23, l24 = sch.split(loop=l3, factors=[v15, v16, v17, v18, v19])
v25, v26, v27 = sch.sample_perfect_tile(loop=l4, n=3, max_innermost_factor=64, decision=[1, 16, 32])
l28, l29, l30 = sch.split(loop=l4, factors=[v25, v26, v27])
sch.reorder(l10, l20, l11, l21, l12, l22, l28, l29, l13, l23, l30, l14, l24)
l31 = sch.fuse(l10, l20)
sch.bind(loop=l31, thread_axis="blockIdx.x")
l32 = sch.fuse(l11, l21)
sch.bind(loop=l32, thread_axis="vthread.x")
l33 = sch.fuse(l12, l22)
sch.bind(loop=l33, thread_axis="threadIdx.x")
b34 = sch.cache_read(block=b0, read_buffer_index=0, storage_scope="shared")
sch.compute_at(block=b34, loop=l28, preserve_unit_loops=True)
_, _, _, _, l39, l40 = sch.get_loops(block=b34)
l41 = sch.fuse(l39, l40)
_, v43 = sch.sample_perfect_tile(loop=l41, n=2, max_innermost_factor=4, decision=[262144, 1])
sch.annotate(block_or_loop=b34, ann_key="meta_schedule.cooperative_fetch", ann_val=v43)
b44 = sch.cache_read(block=b0, read_buffer_index=1, storage_scope="shared")
sch.compute_at(block=b44, loop=l28, preserve_unit_loops=True)
_, _, _, _, l49, l50 = sch.get_loops(block=b44)
l51 = sch.fuse(l49, l50)
_, v53 = sch.sample_perfect_tile(loop=l51, n=2, max_innermost_factor=4, decision=[8192, 2])
sch.annotate(block_or_loop=b44, ann_key="meta_schedule.cooperative_fetch", ann_val=v53)
sch.reverse_compute_at(block=b1, loop=l33, preserve_unit_loops=True)
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, AfterRewrite0) |
def test_rewrite_warp_execution():
mod = create_prim_func(te_workload.matmul(n=512, m=512, k=512))
target = _target()
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
b0 = sch.get_block(name="C", func_name="main")
b1 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="local")
l2, l3, l4 = sch.get_loops(block=b0)
sch.annotate(b0, "warp_execution", 1)
v5, v6, v7, v8, v9 = sch.sample_perfect_tile(loop=l2, n=5, max_innermost_factor=64, decision=[1, 16, 1, 2, 16])
l10, l11, l12, l13, l14 = sch.split(loop=l2, factors=[v5, v6, v7, v8, v9])
v15, v16, v17, v18, v19 = sch.sample_perfect_tile(loop=l3, n=5, max_innermost_factor=64, decision=[16, 1, 8, 2, 2])
l20, l21, l22, l23, l24 = sch.split(loop=l3, factors=[v15, v16, v17, v18, v19])
v25, v26, v27 = sch.sample_perfect_tile(loop=l4, n=3, max_innermost_factor=64, decision=[1, 16, 32])
l28, l29, l30 = sch.split(loop=l4, factors=[v25, v26, v27])
sch.reorder(l10, l20, l11, l21, l12, l22, l28, l29, l13, l23, l30, l14, l24)
l31 = sch.fuse(l10, l20)
sch.bind(loop=l31, thread_axis="blockIdx.x")
l32 = sch.fuse(l11, l21)
sch.bind(loop=l32, thread_axis="vthread.x")
l33 = sch.fuse(l12, l22)
sch.bind(loop=l33, thread_axis="threadIdx.y")
b34 = sch.cache_read(block=b0, read_buffer_index=0, storage_scope="shared")
sch.compute_at(block=b34, loop=l28, preserve_unit_loops=True)
_, _, _, _, l39, l40 = sch.get_loops(block=b34)
l41 = sch.fuse(l39, l40)
_, v43 = sch.sample_perfect_tile(loop=l41, n=2, max_innermost_factor=4, decision=[262144, 1])
sch.annotate(block_or_loop=b34, ann_key="meta_schedule.cooperative_fetch", ann_val=v43)
b44 = sch.cache_read(block=b0, read_buffer_index=1, storage_scope="shared")
sch.compute_at(block=b44, loop=l28, preserve_unit_loops=True)
_, _, _, _, l49, l50 = sch.get_loops(block=b44)
l51 = sch.fuse(l49, l50)
_, v53 = sch.sample_perfect_tile(loop=l51, n=2, max_innermost_factor=4, decision |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.