text
stringlengths 1
2.05k
|
---|
class TestBuilder(PyBuilder):
def build(
self,
build_inputs: List[BuilderInput],
) -> List[BuilderResult]:
return [BuilderResult(None, "error") for w in build_inputs]
builder = TestBuilder()
builder_inputs = [
BuilderInput(MatmulModule, Target("llvm")),
BuilderInput(MatmulReluModule, Target("llvm")),
BuilderInput(BatchMatmulModule, Target("llvm")),
]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
for result in builder_results:
artifact_path = result.artifact_path
error_msg = result.error_msg
assert artifact_path is None
assert error_msg == "error"
def test_meta_schedule_error_handle_build_func():
"""Test the error handing during building"""
def initializer():
@register_func("meta_schedule.builder.test_build")
def test_build(mod: Module, target: Target, _) -> None:
raise ValueError("Builder intended Test Error (build func).")
builder = LocalBuilder(f_build="meta_schedule.builder.test_build", initializer=initializer)
builder_inputs = [BuilderInput(MatmulModule, Target("llvm"))]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
for result in builder_results:
artifact_path = result.artifact_path
error_msg = result.error_msg
assert artifact_path is None
assert error_msg.startswith("LocalBuilder: An exception occurred")
def test_meta_schedule_error_handle_export_func():
"""Test the error handing during building"""
def initializer():
@register_func("meta_schedule.builder.test_export")
def test_build(mod: Module) -> str:
raise ValueError("Builder intended Test Error (export func).")
builder = LocalBuilder(f_export="meta_schedule.builder.test_export", initializer=initializer)
builder_inputs = [BuilderInput(MatmulModule, Target("llvm"))]
builder_ |
results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
for result in builder_results:
artifact_path = result.artifact_path
error_msg = result.error_msg
assert artifact_path is None
assert error_msg.startswith("LocalBuilder: An exception occurred")
def test_meta_schedule_error_handle_time_out():
"""Test the error handing time out during building"""
def initializer():
@register_func("meta_schedule.builder.test_time_out")
def timeout_build(mod, target, _):
time.sleep(2)
builder = LocalBuilder(
timeout_sec=1,
f_build="meta_schedule.builder.test_time_out",
initializer=initializer,
)
builder_inputs = [BuilderInput(MatmulModule, Target("llvm"))]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
for result in builder_results:
artifact_path = result.artifact_path
error_msg = result.error_msg
assert artifact_path is None
assert error_msg.startswith("LocalBuilder: Timeout")
def test_meta_schedule_missing_build_func():
with pytest.raises(ValueError):
LocalBuilder(f_build="wrong-name")
if __name__ == "__main__":
tvm.testing.main() |
""" Test Meta Schedule Builder """ |
import sys
from typing |
import List |
import pytest |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.meta_schedule.arg_info |
import TensorInfo
from tvm.meta_schedule.builder |
import BuilderInput, LocalBuilder
from tvm.meta_schedule.runner |
import EvaluatorConfig, LocalRunner, RunnerInput
from tvm.meta_schedule.testing.custom_builder_runner |
import (
build_relay,
build_relay_with_tensorrt,
run_with_graph_executor,
)
from tvm.meta_schedule.testing.relay_workload |
import get_network
from tvm.relay |
import testing
from tvm.relay.op.contrib |
import tensorrt
from tvm.target |
import Target
from tvm.tir |
import FloatImm
has_tensorrt_codegen = pytest.mark.skipif(
not tvm.get_global_func("relay.ext.tensorrt", True),
reason="TensorRT codegen not available",
)
has_tensorrt_runtime = pytest.mark.skipif(
not tensorrt.is_tensorrt_runtime_enabled(),
reason="TensorRT runtime not available",
)
def get_conv2d_relu(
data_shape,
out_channels,
kernel_size,
strides,
padding,
dilation,
groups,
data_layout,
kernel_layout,
dtype,
):
data = relay.var("data", relay.TensorType(data_shape, dtype))
weight = relay.var("weight")
net = relay.nn.conv2d(
data=data,
weight=weight,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
channels=out_channels,
kernel_size=kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
net = relay.add(net, net)
net = relay.nn.relu(net)
inputs = relay.analysis.free_vars(net)
return relay.Function(inputs, net)
def verify_meta_schedule_with_tensorrt(
mod,
params,
data_shape,
use_trt: bool = True,
):
builder = LocalBuilder(
f_build=build_relay_with_tensorrt if use_trt else build_relay,
timeout_sec=1000,
)
builder_input = BuilderInput(mod, Target("cuda"), params)
builder_result = builder.build([builder_input])[0]
assert builder_result.error_msg is None, builder_result.error_msg
assert builder_result.artifact_path is not None
runner_input = RunnerInput(
builder_result.artifact_path,
device_type="cuda",
args_info=[TensorInfo("float32", data_shape)],
)
runner = LocalRunner(
evaluator_config=EvaluatorConfig(
number=5,
repeat=2,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
),
f_run_evaluator=run_with_graph_executor,
)
runner_future = runner.run([runner_input])[0]
runner_result = runner_future.result()
assert runner_re |
sult is not None
assert runner_result.error_msg is None, runner_result.error_msg
assert runner_result.run_secs is not None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
@has_tensorrt_codegen
def test_conv2d_relu():
data_shape = (1, 1280, 14, 14)
out_channels = 256
kernel_size, strides, padding, dilation, groups = (1, 1), (1, 1), (0, 0, 0, 0), (1, 1), 1
data_layout, kernel_layout = "NCHW", "OIHW"
dtype = "float32"
f = get_conv2d_relu(
data_shape,
out_channels,
kernel_size,
strides,
padding,
dilation,
groups,
data_layout,
kernel_layout,
dtype,
)
mod, params = testing.create_workload(f)
verify_meta_schedule_with_tensorrt(mod, params, data_shape)
@has_tensorrt_codegen
@pytest.mark.parametrize("model_name", ["resnet_50"])
@pytest.mark.parametrize("input_shape", [[1, 3, 224, 224]])
@pytest.mark.parametrize("use_trt", [True, False])
def test_relay_model(model_name: str, input_shape: List[int], use_trt: bool):
mod, params, _ = get_network(model_name, input_shape)
verify_meta_schedule_with_tensorrt(
mod,
params,
input_shape,
use_trt,
)
if __name__ == "__main__":
tvm.testing.main() |
import os |
import re |
import shutil |
import tempfile |
import unittest
from functools |
import partial
from typing |
import List |
import numpy as np |
import tvm |
import tvm.testing
from tvm.meta_schedule.cost_model |
import PyCostModel, RandomModel, XGBModel
from tvm.meta_schedule.cost_model.xgb_model |
import PackSum, _get_custom_call_back
from tvm.meta_schedule.feature_extractor |
import RandomFeatureExtractor
from tvm.meta_schedule.runner |
import RunnerResult
from tvm.meta_schedule.search_strategy |
import MeasureCandidate
from tvm.meta_schedule.tune_context |
import TuneContext
from tvm.meta_schedule.utils |
import derived_object
from tvm.script |
import tir as T
from tvm.tir.schedule.schedule |
import Schedule
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
def test_meta_schedule_cost_model():
@derived_object |
class FancyCostModel(PyCostModel):
def load(self, path: str) -> None:
pass
def save(self, path: str) -> None:
pass
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
pass
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
return np.random.rand(10)
model = FancyCostModel()
model.save("fancy_test_location")
model.load("fancy_test_location")
model.update(TuneContext(), [], [])
results = model.predict(
TuneContext(), [MeasureCandidate(Schedule(mod=Matmul), []) for _ in range(10)]
)
assert results.shape == (10,)
def test_meta_schedule_cost_model_as_string():
@derived_object |
class NotSoFancyCostModel(PyCostModel):
def load(self, path: str) -> None:
pass
def save(self, path: str) -> None:
pass
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
pass
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
return np.random.rand(10)
cost_model = NotSoFancyCostModel()
pattern = re.compile(r"meta_schedule.NotSoFancyCostModel\(0x[a-f|0-9]*\)")
assert pattern.match(str(cost_model))
def test_meta_schedule_random_model():
model = RandomModel()
model.update(TuneContext(), [], [])
res = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(10)])
assert len(res) == 10
assert min(res) >= 0 and max(res) <= model.max_range
def test_meta_schedule_random_model_reseed():
model = RandomModel(seed=100)
res = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(20)])
new_model = RandomModel(seed=100)
new_res = new_model.predict(
TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(20)]
)
assert (res == new_res).all()
def test_meta_schedule_random_model_reload():
model = RandomModel(seed=25973)
model.predict(
TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(30)]
)
path = os.path.join(tempfile.mkdtemp(), "test_output_meta_schedule_random_model.npy")
model.save(path)
res1 = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(70)])
model.load(path)
res2 = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(70)])
shutil.rmtree(os.path.dirname(path))
assert (res1 == res2).all()
def _dummy_candidate():
return MeasureCandidate(Schedule(Matmul), [])
def _dummy_result(num_samples: int = 4, max_r |
un_sec: int = 10):
return RunnerResult(list(np.random.rand(num_samples) * max_run_sec + 1e-6), None)
def test_meta_schedule_xgb_model():
extractor = RandomFeatureExtractor()
model = XGBModel(extractor=extractor, num_warmup_samples=2)
update_sample_count = 10
predict_sample_count = 100
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)])
def test_meta_schedule_xgb_model_reload():
extractor = RandomFeatureExtractor()
model = XGBModel(extractor=extractor, num_warmup_samples=10)
update_sample_count = 20
predict_sample_count = 30
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)])
with tempfile.NamedTemporaryFile() as path:
random_state = model.extractor.random_state
old_data = model.data
old_data_size = model.data_size
model.save(path.name)
res1 = model.predict(
TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)]
)
model.extractor.random_state = random_state
model.load(path.name)
new_data = model.data
new_data_size = model.data_size
res2 = model.predict(
TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)]
)
assert (res1 == res2).all()
assert old_data_size == new_data_size
assert len(old_data) == len(new_data)
for (k1, g1), (k2, g2) in zip(
old_data.items(), new_data.items()
):
assert k1 == k2
assert k1 == g1.group_hash
assert k2 == g2.group_hash
assert (g1.costs == g2.costs).all()
assert len(g1.feature |
s) == len(g2.features)
for f1, f2 in zip(g1.features, g2.features):
assert (f1 == f2).all()
def test_meta_schedule_xgb_model_reupdate():
extractor = RandomFeatureExtractor()
model = XGBModel(extractor=extractor, num_warmup_samples=2)
update_sample_count = 60
predict_sample_count = 100
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)])
def xgb_version_check(): |
import xgboost as xgb
from packaging |
import version
return version.parse(xgb.__version__) >= version.parse("1.6.0")
@unittest.skipIf(xgb_version_check(), "test not supported for xgboost version after 1.6.0")
def test_meta_schedule_xgb_model_callback_as_function():
from itertools |
import chain as itertools_chain |
import xgboost as xgb
extractor = RandomFeatureExtractor()
model = XGBModel(extractor=extractor, num_warmup_samples=10)
update_sample_count = 20
predict_sample_count = 30
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)])
with tempfile.NamedTemporaryFile() as path:
random_state = model.extractor.random_state
model.save(path.name)
old_booster = model.booster
xs = [
x.numpy().astype("float32")
for x in extractor.extract_from(
TuneContext(),
[_dummy_candidate() for i in range(predict_sample_count)],
)
]
d_test = PackSum(xs=xs, ys=None)
pred1 = old_booster.predict(d_test.dmatrix)
model.extractor.random_state = random_state
model.load(path.name)
d_train = PackSum(
xs=list(itertools_chain.from_iterable([g.features for g in model.data.values()])),
ys=np.concatenate(
[g.min_cost / g.costs for g in model.data.values()],
axis=0,
),
)
def obj(ys_pred: np.ndarray, d_train1: "xgb.DMatrix"):
return d_train.obj_square_error(ys_pred)
def rmse(ys_pred: np.ndarray, d_train1: "xgb.DMatrix"):
return d_train.rmse(ys_pred)
def avg_peak_score(ys_pred: np.ndarray, d_train1: "xgb.DMatrix"):
return d_train.average_peak_score(ys_pred, model.average_peak_n)
new_booster = xgb.train(
model.config.to_dict(),
d_train.dmatrix,
num_boost_round=10000,
obj=obj,
callbacks=[
partial(
_get_custom_call_back(
early_stopping_rounds=model.early_stopping_rounds, |
verbose_eval=model.verbose_eval,
fevals=[rmse, avg_peak_score],
evals=[(d_train.dmatrix, "tr")],
cvfolds=None,
)
)
],
)
xs = [
x.numpy().astype("float32")
for x in extractor.extract_from(
TuneContext(),
[_dummy_candidate() for i in range(predict_sample_count)],
)
]
d_test = PackSum(xs=xs, ys=None)
pred2 = new_booster.predict(d_test.dmatrix)
assert np.allclose(pred1, pred2, rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
tvm.testing.main() |
"""Test Meta Schedule Database""" |
import os.path as osp |
import tempfile
from typing |
import Callable, Optional, List |
import tvm |
import tvm.testing
from tvm.target |
import Target
from tvm |
import meta_schedule as ms
from tvm.meta_schedule.database |
import TuningRecord, Workload
from tvm |
import tir
from tvm.ir.module |
import IRModule
from tvm.script |
import tir as T
from tvm.tir |
import Schedule
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class MatmulRelu:
@T.prim_func
def main(a: T.handle, b: T.handle, d: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
D = T.match_buffer(d, (16, 16), "float32")
C = T.alloc_buffer((16, 16), "float32")
for i, j, k in T.grid(16, 16, 16):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(16, 16):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.max(C[vi, vj], 0.0)
def _schedule_matmul(sch: Schedule):
block = sch.get_block("matmul")
i, j, k = sch.get_loops(block=block)
i_tiles = [1, 1, 2, 512]
j_tiles = [1, 512, 1, 2]
k_tiles = [256, 4]
i_0, i_1, i_2, i_3 = sch.split(loop=i, factors=i_tiles)
j_0, j_1, j_2, j_3 = sch.split(loop=j, factors=j_tiles)
k_0, k_1 = sch.split(loop=k, factors=k_tiles)
sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
def _create_schedule(mod: IRModule, sch_fn: Callable[[Schedule], None]) -> Schedule:
sch = tir.Schedule(mod=mod, debug_mask="all")
sch_fn(sch)
ret |
urn sch
def _create_tmp_database(tmpdir: str) -> ms.database.JSONDatabase:
path_workload = osp.join(tmpdir, "workloads.json")
path_tuning_record = osp.join(tmpdir, "tuning_records.json")
return ms.database.JSONDatabase(path_workload, path_tuning_record)
def _equal_record(a: ms.database.TuningRecord, b: ms.database.TuningRecord):
assert str(a.trace) == str(b.trace)
assert str(a.run_secs) == str(b.run_secs)
assert str(a.target) == str(b.target)
assert tvm.ir.structural_equal(a.workload.mod, b.workload.mod)
for arg0, arg1 in zip(a.args_info, b.args_info):
assert str(arg0.as_json()) == str(arg1.as_json())
@ms.utils.derived_object |
class PyMemoryDatabaseDefault(ms.database.PyDatabase):
def __init__(self):
super().__init__()
self.tuning_records_: List[TuningRecord] = []
self.workloads_: List[Workload] = []
def has_workload(self, mod: IRModule) -> bool:
for workload in self.workloads_:
if tvm.ir.structural_equal(mod, workload.mod):
return True
def commit_workload(self, mod: IRModule) -> ms.database.Workload:
if self.has_workload(mod):
for workload in self.workloads_:
if tvm.ir.structural_equal(mod, workload.mod):
return workload
else:
workload = ms.database.Workload(mod)
self.workloads_.append(workload)
return workload
def commit_tuning_record(self, record: TuningRecord) -> None:
self.tuning_records_.append(record)
def get_all_tuning_records(self) -> List[TuningRecord]:
return self.tuning_records_
def get_top_k(self, workload: ms.database.Workload, top_k: int) -> List[TuningRecord]:
return sorted(
list(
filter(
lambda x: tvm.ir.structural_equal(workload.mod, x.workload.mod),
self.tuning_records_,
)
),
key=lambda x: sum(x.run_secs) / len(x.run_secs) if x.run_secs else 1e9,
)[:top_k]
def __len__(self) -> int:
return len(self.tuning_records_)
@ms.utils.derived_object |
class PyMemoryDatabaseOverride(ms.database.PyDatabase):
def __init__(self):
super().__init__()
self.tuning_records_: List[TuningRecord] = []
self.workloads_: List[Workload] = []
def has_workload(self, mod: IRModule) -> bool:
for workload in self.workloads_:
if tvm.ir.structural_equal(mod, workload.mod):
return True
def commit_workload(self, mod: IRModule) -> ms.database.Workload:
if self.has_workload(mod):
for workload in self.workloads_:
if tvm.ir.structural_equal(mod, workload.mod):
return workload
else:
workload = ms.database.Workload(mod)
self.workloads_.append(workload)
return workload
def commit_tuning_record(self, record: TuningRecord) -> None:
self.tuning_records_.append(record)
def get_all_tuning_records(self) -> List[TuningRecord]:
return self.tuning_records_
def get_top_k(self, workload: ms.database.Workload, top_k: int) -> List[TuningRecord]:
return sorted(
list(
filter(
lambda x: tvm.ir.structural_equal(workload.mod, x.workload.mod),
self.tuning_records_,
)
),
key=lambda x: sum(x.run_secs) / len(x.run_secs) if x.run_secs else 1e9,
)[:top_k]
def __len__(self) -> int:
return len(self.tuning_records_)
def query_tuning_record(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[TuningRecord]:
if self.has_workload(mod):
records = self.get_top_k(self.commit_workload(mod), 2)
if len(records) == 1:
return records[0]
elif len(records) == 2:
return records[1]
return None
def query_schedule(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[Schedule]:
record = self.query_tuning_r |
ecord(mod, target, workload_name)
if record is not None:
sch = Schedule(record.workload.mod)
record.trace.apply_to_schedule(sch, remove_postproc=False)
return sch
return None
def query_ir_module(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[IRModule]:
record = self.query_tuning_record(mod, target, workload_name)
if record is not None:
sch = Schedule(record.workload.mod)
record.trace.apply_to_schedule(sch, remove_postproc=False)
return sch.mod
return None
def test_meta_schedule_tuning_record_round_trip():
mod: IRModule = Matmul
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
workload = database.commit_workload(mod)
record = ms.database.TuningRecord(
_create_schedule(mod, _schedule_matmul).trace,
workload,
[1.5, 2.5, 1.8],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
)
database.commit_tuning_record(record)
new_record = ms.database.TuningRecord.from_json(record.as_json(), workload)
_equal_record(record, new_record)
def test_meta_schedule_database_create():
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
assert osp.exists(database.path_workload)
assert osp.exists(database.path_tuning_record)
def test_meta_schedule_database_has_workload():
mod: IRModule = Matmul
missing_mod: IRModule = MatmulRelu
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
workload = database.commit_workload(mod)
record = ms.database.TuningRecord(
_create_schedule(mod, _schedule_matmul).trace,
workload,
[1.5, 2.5, 1.8],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(fu |
nc=mod["main"]),
)
database.commit_tuning_record(record)
assert len(database) == 1
assert database.has_workload(mod)
assert not database.has_workload(missing_mod)
def test_meta_schedule_database_add_entry():
mod: IRModule = Matmul
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
workload = database.commit_workload(mod)
record = ms.database.TuningRecord(
_create_schedule(mod, _schedule_matmul).trace,
workload,
[1.5, 2.5, 1.8],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
)
database.commit_tuning_record(record)
assert len(database) == 1
(ret,) = database.get_top_k(workload, 3)
_equal_record(ret, record)
def test_meta_schedule_database_missing():
mod: IRModule = Matmul
mod_2: IRModule = MatmulRelu
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
workload = database.commit_workload(mod)
workload_2 = database.commit_workload(mod_2)
record = ms.database.TuningRecord(
_create_schedule(mod, _schedule_matmul).trace,
workload,
[1.5, 2.5, 1.8],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
)
database.commit_tuning_record(record)
ret = database.get_top_k(workload_2, 3)
assert len(ret) == 0
def test_meta_schedule_database_sorting():
mod: IRModule = Matmul
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
token = database.commit_workload(mod)
trace = _create_schedule(mod, _schedule_matmul).trace
records = [
ms.database.TuningRecord(
trace,
token,
[7.0, 8.0, 9.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.fr |
om_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[1.0, 2.0, 3.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[4.0, 5.0, 6.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[1.1, 1.2, 600.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[1.0, 100.0, 6.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[4.0, 9.0, 8.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
]
for record in records:
database.commit_tuning_record(record)
ret = database.get_top_k(token, 2)
assert len(ret) == 2
try:
_equal_record(ret[0], records[2])
_equal_record(ret[1], records[1])
except AssertionError:
_equal_record(ret[0], records[1])
_equal_record(ret[1], records[2])
def test_meta_schedule_database_reload():
mod: IRModule = Matmul
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
token = database.commit_workload(mod)
trace = _create_schedule(mod, _schedule_matmul).trace
records = [
ms.database.TuningRecord(
trace,
token, |
[7.0, 8.0, 9.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[1.0, 2.0, 3.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[4.0, 5.0, 6.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
]
for record in records:
database.commit_tuning_record(record)
new_database = ms.database.JSONDatabase(
path_workload=database.path_workload,
path_tuning_record=database.path_tuning_record,
)
token = new_database.commit_workload(mod)
ret = new_database.get_top_k(token, 2)
assert len(ret) == 2
try:
_equal_record(ret[0], records[2])
_equal_record(ret[1], records[1])
except AssertionError:
_equal_record(ret[0], records[1])
_equal_record(ret[1], records[2])
def test_meta_schedule_database_union():
mod: IRModule = Matmul
target = tvm.target.Target("llvm")
arg_info = ms.arg_info.ArgInfo.from_prim_func(func=mod["main"])
db_1 = ms.database.MemoryDatabase()
db_2 = ms.database.MemoryDatabase()
trace = _create_schedule(mod, _schedule_matmul).trace
def query(db):
return db.query_tuning_record(mod=mod, target=target, workload_name="main").run_secs
def commit_record(db, run_sec):
db.commit_tuning_record(
ms.database.TuningRecord(
trace,
workload=db.commit_workload(mod),
run_secs=[run_sec],
target=target,
args_info=arg_info,
)
)
commit_record(db_1, 1.0)
(run_sec, |
) = query(db_1)
assert run_sec.value == 1.0
commit_record(db_2, 0.5)
(run_sec,) = query(db_2)
assert run_sec.value == 0.5
(run_secs,) = query(ms.database.UnionDatabase(db_1, db_2))
assert run_secs.value == 0.5
(run_secs,) = query(ms.database.OrderedUnionDatabase(db_1, db_2))
assert run_secs.value == 1.0
def test_meta_schedule_pydatabase_default_query():
mod: IRModule = Matmul
target = tvm.target.Target("llvm")
arg_info = ms.arg_info.ArgInfo.from_prim_func(func=mod["main"])
db = PyMemoryDatabaseDefault()
sch = _create_schedule(mod, _schedule_matmul)
trace = sch.trace
def query(db, mod, target, kind):
return db.query(mod=mod, target=target, workload_name="main", kind=kind)
def commit_record(trace, db, run_sec):
db.commit_tuning_record(
ms.database.TuningRecord(
trace,
workload=db.commit_workload(mod),
run_secs=[run_sec],
target=target,
args_info=arg_info,
)
)
commit_record(trace, db, 1.0)
record = query(db, mod, target, "record")
assert record is not None and record.run_secs[0].value == 1.0
sch_res = query(db, mod, target, "schedule")
assert sch_res is not None and tvm.ir.structural_equal(sch_res.mod, sch.mod)
mod_res = query(db, mod, target, "ir_module")
assert mod_res is not None and tvm.ir.structural_equal(mod_res, sch.mod)
commit_record(Schedule(mod).trace, db, 0.2)
record = query(db, mod, target, "record")
assert record is not None and record.run_secs[0].value == 0.2
sch_res = query(db, mod, target, "schedule")
assert sch_res is not None and tvm.ir.structural_equal(sch_res.mod, mod)
mod_res = query(db, mod, target, "ir_module")
assert mod_res is not None and tvm.ir.structural_equal(mod_res, mod)
def test_meta_schedule_pydatabase_override_query():
mod: IRModule = Matmul
target = tvm.target.Target("llvm")
arg_info = ms.arg_info.ArgInfo. |
from_prim_func(func=mod["main"])
db = PyMemoryDatabaseOverride()
sch = _create_schedule(mod, _schedule_matmul)
trace = sch.trace
def query(db, mod, target, kind):
return db.query(mod=mod, target=target, workload_name="main", kind=kind)
def commit_record(trace, db, run_sec):
db.commit_tuning_record(
ms.database.TuningRecord(
trace,
workload=db.commit_workload(mod),
run_secs=[run_sec],
target=target,
args_info=arg_info,
)
)
commit_record(trace, db, 1.14)
record = query(db, mod, target, "record")
assert record is not None and record.run_secs[0].value == 1.14
sch_res = query(db, mod, target, "schedule")
assert sch_res is not None and tvm.ir.structural_equal(sch_res.mod, sch.mod)
mod_res = query(db, mod, target, "ir_module")
assert mod_res is not None and tvm.ir.structural_equal(mod_res, sch.mod)
commit_record(Schedule(mod).trace, db, 0.514)
record = query(db, mod, target, "record")
assert record is not None and record.run_secs[0].value == 1.14
sch_res = query(db, mod, target, "schedule")
assert sch_res is not None and tvm.ir.structural_equal(sch_res.mod, sch.mod)
mod_res = query(db, mod, target, "ir_module")
assert mod_res is not None and tvm.ir.structural_equal(mod_res, sch.mod)
def test_meta_schedule_pydatabase_current():
db = PyMemoryDatabaseDefault()
with db:
assert ms.database.Database.current() == db
if __name__ == "__main__":
tvm.testing.main() |
import re
from typing |
import List |
import numpy as np
from tvm.meta_schedule |
import TuneContext
from tvm.meta_schedule.feature_extractor |
import PyFeatureExtractor
from tvm.meta_schedule.search_strategy |
import MeasureCandidate
from tvm.meta_schedule.utils |
import derived_object
from tvm.runtime.ndarray |
import array
def test_meta_schedule_feature_extractor():
@derived_object |
class FancyFeatureExtractor(PyFeatureExtractor):
def extract_from(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
) -> List[np.ndarray]:
return [array(np.random.rand(4, 5))]
extractor = FancyFeatureExtractor()
features = extractor.extract_from(TuneContext(), [])
assert len(features) == 1
assert features[0].shape == (4, 5)
def test_meta_schedule_feature_extractor_as_string():
@derived_object |
class NotSoFancyFeatureExtractor(PyFeatureExtractor):
def extract_from(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
) -> List[np.ndarray]:
return []
feature_extractor = NotSoFancyFeatureExtractor()
pattern = re.compile(r"meta_schedule.NotSoFancyFeatureExtractor\(0x[a-f|0-9]*\)")
assert pattern.match(str(feature_extractor))
if __name__ == "__main__":
test_meta_schedule_feature_extractor()
test_meta_schedule_feature_extractor_as_string() |
import sys
from typing |
import Callable, List |
import pytest |
import tvm |
import tvm.testing
from numpy.testing |
import assert_allclose
from tvm |
import meta_schedule as ms
from tvm |
import te, tir
from tvm.script |
import tir as T
N_FEATURES = 164
@T.prim_func
def matmul(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
C: T.Buffer[(512, 512), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2 in T.grid(512, 512, 512):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C[i, j], A[i, k], B[k, j])
T.writes(C[i, j])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
@tvm.script.ir_module
class LayoutTransform:
@T.prim_func
def main(placeholder: T.Buffer[(1, 16, 7, 7, 32), "float32"], placeholder_1: T.Buffer[(25088,), "float32"], T_layout_trans: T.Buffer[(1, 1, 7, 7, 512), "float32"]) -> None:
T.func_attr({"tir.noalias": True, "global_symbol": "main"})
for i0_i1_i2_i3_i4_fused in T.parallel(25088, annotations={"pragma_auto_unroll_max_step":64, "pragma_unroll_explicit":1}):
with T.block("T_layout_trans_1"):
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(1, 0)
ax2 = T.axis.spatial(7, i0_i1_i2_i3_i4_fused
ax3 = T.axis.spatial(7, i0_i1_i2_i3_i4_fused % 3584
ax4 = T.axis.spatial(512, i0_i1_i2_i3_i4_fused % 512)
T.reads(placeholder[0, (ax4 * 49 + ax2 * 7 + ax3) % 25088
T.writes(T_layout_trans[ax0, ax1, ax2, ax3, ax4])
T_layout_trans[ax0, ax1, ax2, ax3, ax4] = T.if_then_else(ax0 < 1 and ax1 * 512 + ax4 < 512 and ax2 < 7 and ax3 < 7, T.Select(T.float32(0) < T.if_then_else(0 < 1 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088
def _make_context(target) -> ms.TuneContext:
return ms.TuneContext(
target=target,
num_threads=1,
)
def _make_candidate(f_sch: Callable[[], tir.Schedule]) -> ms.MeasureCandidate:
return ms.MeasureCandidate(sch=f_sch(), args_info=[])
d |
ef _feature_names(
buffers_per_store: int = 5,
arith_intensity_curve_num_samples: int = 10,
) -> List[str]:
result = [
"float_mad",
"float_addsub",
"float_mul",
"float_divmod",
"float_cmp",
"float_mathfunc",
"float_otherfunc",
"int_mad",
"int_addsub",
"int_mul",
"int_divmod",
"int_cmp",
"int_mathfunc",
"int_otherfunc",
"bool_op",
"select_op",
"vec_num",
"vec_prod",
"vec_len",
"vec_type.kPosNone",
"vec_type.kPosInnerSpatial",
"vec_type.kPosMiddleSpatial",
"vec_type.kPosOuterSpatial",
"vec_type.kPosInnerReduce",
"vec_type.kPosMiddleReduce",
"vec_type.kPosOuterReduce",
"vec_type.kPosMixed",
"unroll_num",
"unroll_prod",
"unroll_len",
"unroll_type.kPosNone",
"unroll_type.kPosInnerSpatial",
"unroll_type.kPosMiddleSpatial",
"unroll_type.kPosOuterSpatial",
"unroll_type.kPosInnerReduce",
"unroll_type.kPosMiddleReduce",
"unroll_type.kPosOuterReduce",
"unroll_type.kPosMixed",
"parallel_num",
"parallel_prod",
"parallel_len",
"parallel_type.kPosNone",
"parallel_type.kPosInnerSpatial",
"parallel_type.kPosMiddleSpatial",
"parallel_type.kPosOuterSpatial",
"parallel_type.kPosInnerReduce",
"parallel_type.kPosMiddleReduce",
"parallel_type.kPosOuterReduce",
"parallel_type.kPosMixed",
"is_gpu",
"blockIdx_x_len",
"blockIdx_y_len",
"blockIdx_z_len",
"threadIdx_x_len",
"threadIdx_y_len",
"threadIdx_z_len",
"vthread_len",
]
for i in range(buffers_per_store):
result.extend(
f"B{i}.{s}"
for s in [
"acc_type.kRead",
"acc_type.kWrite",
"acc_type.kReadWrite",
"bytes", |
"unique_bytes",
"lines",
"unique_lines",
"reuse_type.kLoopMultipleRead",
"reuse_type.kSerialMultipleReadWrite",
"reuse_type.kNoReuse",
"reuse_dis_iter",
"reuse_dis_bytes",
"reuse_ct",
"bytes_d_reuse_ct",
"unique_bytes_d_reuse_ct",
"lines_d_reuse_ct",
"unique_lines_d_reuse_ct",
"stride",
]
)
result.extend(f"arith_intensity_curve_{i}" for i in range(arith_intensity_curve_num_samples))
result.extend(
[
"alloc_size",
"alloc_prod",
"alloc_outer_prod",
"alloc_inner_prod",
"outer_prod",
"num_loops",
"auto_unroll_max_step",
]
)
assert len(result) == N_FEATURES
return result
def _zip_feature(feature, names):
assert feature.ndim == 1
assert feature.shape[0] == N_FEATURES
assert len(names) == N_FEATURES
return list(zip(names, feature))
def _print_feature(feature, st, ed):
named_feature = _zip_feature(feature, _feature_names())
for k, v in named_feature[st:ed]:
print("\t", k, v)
def test_cpu_matmul():
def _create_schedule():
func = matmul
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("C")
i, j, k = sch.get_loops(block)
i_o, i_i = sch.split(i, factors=[None, 16])
j_o, j_i = sch.split(j, factors=[None, 8])
sch.reorder(i_o, j_o, k, j_i, i_i)
sch.vectorize(j_i)
sch.parallel(i_o)
sch.parallel(j_o)
sch.unroll(k)
return sch
extractor = ms.feature_extractor.PerStoreFeature()
(feature,) = extractor.extract_from(
_make_context(tvm.target.Target("llvm")),
candidates=[_make_candidate(_create_schedule)],
)
feature = feature.numpy()
assert feature.shape == (1, N_FEATURES)
f = feature[0] |
assert_allclose(
actual=f[0:16],
desired=[
0, 27, 27, 0, 0, 0, 0,
0, 29, 29, 0, 0, 0, 0,
0, 0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[16:27],
desired=[1.0, 3.169924, 3.169924, 0, 0, 0, 0, 0, 0, 0, 1],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[27:38],
desired=[1.0, 9.002815, 9.002815, 0, 0, 0, 0, 0, 0, 0, 1],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[38:49],
desired=[1.58496, 11.0007, 6.022368, 0, 0, 0, 0, 0, 0, 0, 1],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[49:57],
desired=[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[57:75],
desired=[
1,
0,
0,
29,
20,
27,
14,
1,
0,
0,
4.087463,
7.0552826,
3.169925,
26,
17,
24,
11.0007038,
9.002815,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[75:93],
desired=[
0.0,
0.0,
1.0,
29.0,
20.000001907348633,
27.0,
14.00008773803711,
1.0,
0.0,
0.0,
7.011227130889893,
9.250298500061035,
9.002815246582031,
20.000001907348633,
11.000703811645508,
18.0000057220459,
5.044394016265869,
9.002815246582031,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[93:111],
desired=[
1.0,
0.0,
0.0, |
29.0,
20.000001907348633,
19.000001907348633,
14.00008773803711,
1.0,
0.0,
0.0,
1.0,
3.700439691543579,
4.087462902069092,
25.0,
16.000022888183594,
15.000043869018555,
10.001408194392809,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[111:129],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[129:147],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[147:157],
desired=[
3.812599,
4.464822,
4.912349,
5.253426,
5.529086,
5.76043,
5.959752,
6.134849,
6.290977,
6.431846,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[157:164],
desired=[
20.000001907348633,
18.0000057220459,
1.0,
27.0,
27.0,
2.5849626064300537,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
def test_cpu_fusion():
@T.prim_func
def func(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [64, 32], dtype="float32")
B = T.match_buffer(b, [64, 32], dtype="float32")
C = T.match_buffer(c, [64, 32], dtype="float32")
for i, j in T.grid(64, 32):
with T.block():
T.reads([A[i, j], B[i, j]])
T.writes([B[i, j], C[i, j]])
with T.block("B"):
T.reads([A[i, j]])
T.writes([B[i, j]])
B[i, j] = A[i, j]
with T.block("C"):
T.reads([B[i, j]])
T.writes([C[i, j]]) |
C[i, j] = B[i, j]
def _create_schedule():
return tir.Schedule(func, debug_mask="all")
extractor = ms.feature_extractor.PerStoreFeature()
(feature,) = extractor.extract_from(
_make_context(tvm.target.Target("llvm")),
candidates=[_make_candidate(_create_schedule)],
)
feature = feature.numpy()
assert feature.shape == (2, N_FEATURES)
f = feature[0]
assert_allclose(
actual=f[0:16],
desired=[0.0] * 16,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[49:57],
desired=[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[57:75],
desired=[
1.0,
0.0,
0.0,
13.000176429748535,
13.000176429748535,
7.011227130889893,
7.011227130889893,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
14.00008773803711,
14.00008773803711,
8.005624771118164,
8.005624771118164,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[75:93],
desired=[
0.0,
1.0,
0.0,
13.000176429748535,
13.000176429748535,
7.011227130889893,
7.011227130889893,
0.0,
0.0, |
1.0,
0.0,
0.0,
0.0,
14.00008773803711,
14.00008773803711,
8.005624771118164,
8.005624771118164,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[93:111],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[111:129],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[129:147],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[147:157],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[157:164],
desired=[
13.000176,
11.000703811645508,
1.0,
11.000703811645508,
11.000703811645508,
1.5849624872207642,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
f = feature[1]
assert_allclose(
actual=f[0:16],
desired=[0.0] * 16,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[49:57],
desired=[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[57:75],
desired=[
1.0,
0.0, |
0.0,
13.000176429748535,
13.000176429748535,
7.011227130889893,
7.011227130889893,
0.0,
1.0,
0.0,
1.0,
4.087462902069092,
1.0,
13.000176429748535,
13.000176429748535,
7.011227130889893,
7.011227130889893,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[75:93],
desired=[
0.0,
1.0,
0.0,
13.000176429748535,
13.000176429748535,
7.011227130889893,
7.011227130889893,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
14.00008773803711,
14.00008773803711,
8.005624771118164,
8.005624771118164,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[93:111],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[111:129],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[129:147],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[147:157],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[157:164],
desired=[
13.000176429748535,
11.000703811645508,
1.0,
11.000703811645508,
11.000703811645508,
1.5849624872207642,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
def test_gpu():
def _create_schedule():
func = matmul
sch = tir.Schedule(func, debug_mask="all")
c = sch.get_block("C")
c_local = sch.cache_write(c, 0, "local") |
i, j, k = sch.get_loops(c)
i0, i1, i2, i3, i4 = sch.split(i, factors=[None, 1, 16, 32, 1])
j0, j1, j2, j3, j4 = sch.split(j, factors=[None, 4, 1, 1, 16])
k0, k1, k2 = sch.split(k, factors=[None, 1, 2])
sch.reorder(
i0, j0,
i1, j1,
i2, j2,
k0,
k1,
i3, j3,
k2,
i4, j4,
)
i0_j0 = sch.fuse(i0, j0)
i1_j1 = sch.fuse(i1, j1)
i2_j2 = sch.fuse(i2, j2)
sch.bind(i0_j0, "blockIdx.x")
sch.bind(i1_j1, "vthread.x")
sch.bind(i2_j2, "threadIdx.x")
sch.reverse_compute_at(c_local, i2_j2)
a_shared = sch.cache_read(c, 1, "shared")
sch.compute_at(a_shared, k0)
_, _, _, _, a_i, a_j = sch.get_loops(a_shared)
a_ij = sch.fuse(a_i, a_j)
_, a_j = sch.split(a_ij, factors=[None, 16])
sch.bind(a_j, "threadIdx.x")
b_shared = sch.cache_read(c, 2, "shared")
sch.compute_at(b_shared, k0)
_, _, _, _, b_i, b_j = sch.get_loops(b_shared)
b_ij = sch.fuse(b_i, b_j)
_, b_j = sch.split(b_ij, factors=[None, 16])
sch.bind(b_j, "threadIdx.x")
sch.annotate(i0_j0, "pragma_auto_unroll_max_step", tir.IntImm("int32", 1024))
sch.annotate(i0_j0, "pragma_unroll_explicit", tir.IntImm("int32", 1))
return sch
extractor = ms.feature_extractor.PerStoreFeature()
(feature,) = extractor.extract_from(
_make_context(tvm.target.Target("cuda")),
candidates=[_make_candidate(_create_schedule)],
)
feature = feature.numpy()
assert feature.shape == (4, N_FEATURES)
f = feature[0]
assert_allclose(
actual=f[0:16],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
24.000000085991324, |
24.000000085991324,
24.000000085991324,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[49:57],
desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[57:75],
desired=[
1.0,
0.0,
0.0,
25.000000042995662,
20.000001375860553,
23.00000017198264,
14.000088052430122,
1.0,
0.0,
0.0,
18.00000550343433,
20.00562591970089,
2.321928094887362,
23.00000017198264,
18.00000550343433,
21.000000687930438,
12.0003521774803,
12.0003521774803,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[75:93],
desired=[
0.0,
1.0,
0.0,
25.000000042995662,
12.0003521774803,
23.00000017198264,
9.002815015607053,
1.0,
0.0,
0.0,
6.022367813028454,
11.98049663618346,
8.005624549193879,
17.000011006847668,
4.087462841250339,
15.000044026886828,
1.584962500721156,
4.087462841250339, |
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[93:111],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[111:129],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[129:147],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[147:157],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[157:164],
desired=[
12.0003521774803,
27.000000010748916,
17.000011006847668,
6.022367813028454,
23.00000017198264,
2.584962500721156,
10.001408,
],
rtol=1e-5,
atol=1e-5,
)
f = feature[1]
assert_allclose(
actual=f[0:16],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0, |
0.0,
0.0,
21.584962959341485,
21.584962959341485,
21.000000687930438,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[49:57],
desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[57:75],
desired=[
1.0,
0.0,
0.0,
22.00000034396526,
20.000001375860553,
20.000001375860553,
14.000088052430122,
1.0,
0.0,
0.0,
15.000044026886828,
20.17555076886471,
2.321928094887362,
20.000001375860553,
18.00000550343433,
18.00000550343433,
12.0003521774803,
4.087462841250339,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[75:93],
desired=[
0.0,
1.0,
0.0,
22.00000034396526,
9.002815015607053,
20.000001375860553,
3.169925001442312,
1.0,
0.0,
0.0,
3.169925001442312,
9.61654884377899,
8.005624549193879,
14.000088052430122,
1.584962500721156,
12.0003521774803 |
,
0.044394119358453436,
4.087462841250339,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[93:111],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[111:129],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[129:147],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[147:157],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
actual=f[157:164],
desired=[
9.002815015607053,
24.000000085991324,
17.000011006847668,
3.169925001442312,
20.000001375860553,
2.584962500721156,
10.001408,
],
rtol=1e-5,
atol=1e-5,
)
f = feature[2]
assert_allclose(
actual=f[0:16],
desired=[
0.0,
27.0000 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.