text
stringlengths 1
2.05k
|
---|
= schedule[result_c].split(result_c.op.axis[0], factor=num_thread)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
target = tvm.target.Target(device)
if "cpu" not in target.keys:
schedule[result_c].bind(axis0, te.thread_axis("blockIdx.x"))
schedule[result_c].bind(axis1, te.thread_axis("threadIdx.x"))
fmod = tvm.build(
schedule, [placeholder_a, placeholder_b, result_c], device, name="myfmod"
)
value_n = 1024
a_np = (np.random.uniform(size=value_n) * 256).astype(placeholder_a.dtype)
b_np = (np.random.uniform(size=value_n) * 256).astype(placeholder_b.dtype)
b_np += (b_np < 2.0) * 2
a_np[np.abs(np.fmod(a_np, b_np)) < 1] += 1
buff_a = tvm.nd.array(a_np, dev)
buff_b = tvm.nd.array(b_np, dev)
buff_c = tvm.nd.array(np.zeros(value_n, dtype=result_c.dtype), dev)
ftimer = fmod.time_evaluator(fmod.entry_name, dev, number=1)
_ = ftimer(buff_a, buff_b, buff_c).mean
np.testing.assert_allclose(
buff_c.numpy(), np.mod(buff_a.numpy(), buff_b.numpy()), rtol=1e-5
)
check_device("cuda")
check_device("opencl -device=intel_graphics")
check_device("metal")
run("float32")
@tvm.testing.requires_gpu
def test_multiple_cache_write():
"""Test multiple cache writes."""
arr_length = 1024
arr_length_tvm = tvm.runtime.convert(arr_length)
placeholder_a0 = te.placeholder((arr_length_tvm,), name="A0", dtype="float32")
placeholder_a1 = te.placeholder((arr_length_tvm,), name="A1", dtype="float32")
result_b0, result_b1 = te.compute(
(arr_length_tvm,),
lambda *i: (
placeholder_a0(*i) + placeholder_a1(*i), |
placeholder_a0(*i) * placeholder_a1(*i),
),
name="B",
)
result_c = te.compute((arr_length_tvm,), lambda *i: result_b0(*i) + result_b1(*i), name="C")
schedule = te.create_schedule(result_c.op)
num_thread = 8
cache_b0, _ = schedule.cache_write([result_b0, result_b1], "local")
axis0, axis1 = schedule[result_c].split(result_c.op.axis[0], factor=num_thread)
schedule[result_b0].compute_at(schedule[result_c], axis0)
schedule[cache_b0].compute_at(schedule[result_c], axis0)
schedule[result_c].bind(axis0, te.thread_axis("blockIdx.x"))
schedule[result_c].bind(axis1, te.thread_axis("threadIdx.x"))
def check_device(device, host="stackvm"):
if not tvm.testing.device_enabled(host):
return
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
return
func = tvm.build(
schedule,
[placeholder_a0, placeholder_a1, result_c],
device,
host,
name="multiple_cache_write",
)
dev = tvm.device(device, 0)
buff_a0 = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_a0.dtype), dev)
buff_a1 = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_a1.dtype), dev)
buff_c = tvm.nd.array(np.zeros(arr_length, dtype=result_c.dtype), dev)
func(buff_a0, buff_a1, buff_c)
tvm.testing.assert_allclose(
buff_c.numpy(),
buff_a0.numpy() + buff_a1.numpy() + (buff_a0.numpy() * buff_a1.numpy()),
rtol=1e-5,
)
check_device("cuda", "llvm")
check_device("vulkan")
check_device("opencl")
def test_log_pow_llvm():
"""Test log pow using llvm to lower."""
size_var_n = te.size_var("n")
placeholder_a = te.placeholder((size_var_n,), name="A")
result_b = te.compute(
placeholder_a.shape, lambda *i: te.power(te.log(placeholder_a(*i)), 2.0), name="B"
)
schedule = te.create_schedule(result_b.op |
)
schedule[result_b].split(result_b.op.axis[0], factor=32)
if not tvm.testing.device_enabled("llvm"):
return
flog = tvm.build(schedule, [placeholder_a, result_b], "llvm", name="mylog")
dev = tvm.cpu(0)
size_var_n = 1028
buff_a = tvm.nd.array(np.random.uniform(size=size_var_n).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.zeros(size_var_n, dtype=result_b.dtype), dev)
repeat = 10
ftimer = flog.time_evaluator(flog.entry_name, dev, number=1, repeat=repeat)
res = ftimer(buff_a, buff_b)
assert len(res.results) == repeat
tvm.testing.assert_allclose(buff_b.numpy(), np.power(np.log(buff_a.numpy()), 2.0), rtol=1e-5)
@tvm.testing.uses_gpu
def test_popcount():
"""Test popcount."""
def run(dtype):
arr_length = 1024
arr_length_tvm = tvm.runtime.convert(1024)
placeholder_a = te.placeholder((arr_length_tvm,), name="A", dtype=dtype)
placeholder_b = te.compute(
placeholder_a.shape, lambda *i: tvm.tir.popcount(placeholder_a(*i)), name="B"
)
schedule = te.create_schedule(placeholder_b.op)
num_thread = 8
axis1, axis2 = schedule[placeholder_b].split(placeholder_b.op.axis[0], factor=num_thread)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
target = tvm.target.Target(device)
if "cpu" not in target.keys:
schedule[placeholder_b].bind(axis1, te.thread_axis("blockIdx.x"))
schedule[placeholder_b].bind(axis2, te.thread_axis("threadIdx.x"))
func = tvm.build(schedule, [placeholder_a, placeholder_b], device)
buff_a = tvm.nd.array(
np.random.randint(low=0, high=1000, size=arr_length, dtype=placeholder_a.dtype), dev
)
buff_b = tvm.nd.array(np.zeros(shape=arr_l |
ength, dtype=placeholder_b.dtype), dev)
func(buff_a, buff_b)
tvm.testing.assert_allclose(
buff_b.numpy(), list(map(lambda x: bin(x).count("1"), buff_a.numpy())), rtol=1e-5
)
check_device("llvm")
check_device("cuda")
check_device("opencl")
if dtype == "uint32":
check_device("metal")
check_device("vulkan")
run("uint32")
run("uint64")
@tvm.testing.requires_gpu
def test_add():
"""Test addition."""
def run(dtype):
size_var_n = te.size_var("n")
placeholder_a = te.placeholder((size_var_n,), name="A", dtype=dtype)
placeholder_b = te.placeholder((size_var_n,), name="B", dtype=dtype)
result_c = te.compute(
placeholder_a.shape, lambda *i: placeholder_a(*i) + placeholder_b(*i), name="C"
)
schedule = te.create_schedule(result_c.op)
num_thread = 16
axis_bx, axis_x = schedule[result_c].split(result_c.op.axis[0], factor=num_thread * 4)
axis_tx, axis_x = schedule[result_c].split(axis_x, nparts=num_thread)
_, axis_x = schedule[result_c].split(axis_x, factor=4)
schedule[result_c].bind(axis_bx, te.thread_axis("blockIdx.x"))
schedule[result_c].bind(axis_tx, te.thread_axis("threadIdx.x"))
schedule[result_c].vectorize(axis_x)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fadd = tvm.build(
schedule, [placeholder_a, placeholder_b, result_c], device, name="myadd"
)
n = 1024
buff_a = tvm.nd.array(
(np.random.uniform(size=n) * 256).astype(placeholder_a.dtype), dev
)
buff_b = tvm.nd.array(
(np.random.uniform(size=n) * 256).astype(placeholder_b.dtype), dev |
)
buff_c = tvm.nd.array(np.zeros(n, dtype=result_c.dtype), dev)
ftimer = fadd.time_evaluator(fadd.entry_name, dev, number=1)
_ = ftimer(buff_a, buff_b, buff_c).mean
tvm.testing.assert_allclose(buff_c.numpy(), buff_a.numpy() + buff_b.numpy(), rtol=1e-6)
check_device("opencl")
check_device("cuda")
if dtype == "float32":
check_device("metal")
check_device("vulkan")
run("float32")
run("int32")
run("int64")
run("uint64")
@tvm.testing.requires_gpu
def try_warp_memory():
"""Test using warp memory
skip this in default test because it |
require higher arch"""
arr_size = 128
placeholder_a = te.placeholder((arr_size,), name="A")
result_b = te.compute((arr_size,), lambda i: placeholder_a[i] + 3, name="B")
warp_size = 32
schedule = te.create_schedule(result_b.op)
cache_read_aa = schedule.cache_read(placeholder_a, "warp", [result_b])
axis_x0, axis_xi = schedule[result_b].split(result_b.op.axis[0], warp_size * 2)
_, axis_xi1 = schedule[result_b].split(axis_xi, factor=warp_size)
thread_axis_tx = te.thread_axis("threadIdx.x")
schedule[result_b].bind(axis_xi1, thread_axis_tx)
schedule[result_b].bind(axis_x0, te.thread_axis("blockIdx.x"))
schedule[cache_read_aa].compute_at(schedule[result_b], axis_x0)
axis_x0, axis_xi = schedule[cache_read_aa].split(schedule[cache_read_aa].op.axis[0], warp_size)
schedule[cache_read_aa].bind(axis_xi, thread_axis_tx)
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code)
return ptx
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
myfunc = tvm.build(schedule, [placeholder_a, result_b], device)
buff_a = tvm.nd.array(
(np.random.uniform(size=arr_size) * 256).astype(placeholder_a.dtype), dev
)
buff_b = tvm.nd.array(np.zeros(arr_size, dtype=result_b.dtype), dev)
myfunc(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), buff_a.numpy() + 3, rtol=1e-6)
check_device("cuda")
if __name__ == "__main__":
test_exp()
try_warp_memory()
test_multiple_cache_write()
test_add()
test_log_pow_llvm()
test_popcount()
test_fmod() |
"""Test elementwise ops on fpga.""" |
import os |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import te
os.environ["XCL_EMULATION_MODE"] = "1"
os.environ["CL_CONTEXT_EMULATOR_DEVICE_INTELFPGA"] = "1"
@tvm.register_func
def tvm_callback_vhls_postproc(code):
"""Hook to inspect the Vivado HLS code before actually run it"""
print(code)
return code
def test_exp():
"""Test scheduling and running exp function."""
arr_length = 1024
arr_length_tvm = tvm.runtime.convert(arr_length)
placeholder_b = te.placeholder((arr_length_tvm,), name="A")
result_b = te.compute(placeholder_b.shape, lambda *i: te.exp(placeholder_b(*i)), name="B")
schedule = te.create_schedule(result_b.op)
axis1, _ = schedule[result_b].split(result_b.op.axis[0], nparts=1)
schedule[result_b].bind(axis1, te.thread_axis("pipeline"))
def check_device(device, host="llvm"):
if not tvm.testing.device_enabled(device):
return
dev = tvm.device(device, 0)
fexp = tvm.build(schedule, [placeholder_b, result_b], device, host, name="myexp")
dev = tvm.device(device, 0)
buff_a = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_b.dtype), dev)
buff_b = tvm.nd.array(np.zeros(arr_length, dtype=result_b.dtype), dev)
fexp(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), np.exp(buff_a.numpy()), rtol=1e-5)
check_device("sdaccel")
if "AWS_PLATFORM" in os.environ:
check_device("sdaccel -device=" + os.environ.get("AWS_PLATFORM"))
check_device("aocl_sw_emu")
def test_multi_kernel():
"""Test scheduling with multiple computes."""
arr_length = 1024
arr_length_tvm = tvm.runtime.convert(arr_length)
placeholder_a = te.placeholder((arr_length_tvm,), name="A")
placeholder_b = te.placeholder((arr_length_tvm,), name="B")
result_c = te.compute(
placeholder_a.shape, lambda *i: placeholder_a(*i) + placeholder_b(*i), name="C"
)
result_d = te.compute(
placeholder_a.shape, lambda *i: placeholder_a(*i) + result_c(*i), name="D"
)
sch |
edule = te.create_schedule(result_d.op)
axis1, _ = schedule[result_c].split(result_c.op.axis[0], nparts=1)
schedule[result_c].bind(axis1, te.thread_axis("pipeline"))
axis1, _ = schedule[result_d].split(result_d.op.axis[0], nparts=1)
schedule[result_d].bind(axis1, te.thread_axis("pipeline"))
def check_device(device, host="llvm"):
if not tvm.testing.device_enabled(device):
return
dev = tvm.device(device, 0)
fadd = tvm.build(
schedule, [placeholder_a, placeholder_b, result_c, result_d], device, host, name="myadd"
)
dev = tvm.device(device, 0)
buff_a = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_b.dtype), dev)
buff_c = tvm.nd.array(np.random.uniform(size=arr_length).astype(result_c.dtype), dev)
buff_d = tvm.nd.array(np.random.uniform(size=arr_length).astype(result_d.dtype), dev)
fadd(buff_a, buff_b, buff_c, buff_d)
tvm.testing.assert_allclose(buff_d.numpy(), buff_a.numpy() * 2 + buff_b.numpy(), rtol=1e-5)
check_device("sdaccel")
check_device("aocl_sw_emu")
if __name__ == "__main__":
test_exp()
test_multi_kernel() |
"""Test scheduling and running a gemm!""" |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import te
@tvm.testing.requires_gpu
def test_gemm():
"""Test the gemm!"""
dim1_length = 1024
dim_n = tvm.runtime.convert(dim1_length)
dim_m = dim_n
dim_l = dim_n
placeholder_a = te.placeholder((dim_n, dim_l), name="A")
placeholder_b = te.placeholder((dim_m, dim_l), name="B")
axis_k = te.reduce_axis((0, dim_l), name="k")
result_c = te.compute(
(dim_n, dim_m),
lambda ii, jj: te.sum(placeholder_a[ii, axis_k] * placeholder_b[jj, axis_k], axis=axis_k),
name="CC",
)
schedule = te.create_schedule(result_c.op)
scale = 8
num_thread = 8
block_factor = scale * num_thread
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_y = te.thread_axis("threadIdx.y")
cache_write = schedule.cache_write(result_c, "local")
cache_read_a = schedule.cache_read(placeholder_a, "shared", [cache_write])
cache_read_b = schedule.cache_read(placeholder_b, "shared", [cache_write])
axis_by, axis_yi = schedule[result_c].split(result_c.op.axis[0], factor=block_factor)
axis_bx, axis_xi = schedule[result_c].split(result_c.op.axis[1], factor=block_factor)
schedule[result_c].reorder(axis_by, axis_bx, axis_yi, axis_xi)
schedule[result_c].bind(axis_by, block_y)
schedule[result_c].bind(axis_bx, block_x)
axis_ty, axis_yi = schedule[result_c].split(axis_yi, nparts=num_thread)
axis_tx, axis_xi = schedule[result_c].split(axis_xi, nparts=num_thread)
schedule[result_c].reorder(axis_ty, axis_tx, axis_yi, axis_xi)
schedule[result_c].bind(axis_ty, thread_y)
schedule[result_c].bind(axis_tx, thread_x)
axis_yo, axis_xo = cache_write.op.axis
schedule[cache_write].reorder(axis_k, axis_yo, axis_xo)
schedule[cache_write].compute_at(schedule[result_c], axis_tx)
schedule[cache_read_a].compute_at(schedule[cache_write], axis_k)
schedule[cache_read_b].compute_at(schedule[cache_write], axis_k)
schedule[cache_read_a].double_bu |
ffer()
schedule[cache_read_b].double_buffer()
axis_ty, axis_xi = schedule[cache_read_a].split(
schedule[cache_read_a].op.axis[0], nparts=num_thread
)
axis_tx, axis_xi = schedule[cache_read_a].split(axis_xi, nparts=num_thread)
schedule[cache_read_a].bind(axis_ty, thread_y)
schedule[cache_read_a].bind(axis_tx, thread_x)
axis_ty, axis_xi = schedule[cache_read_b].split(
schedule[cache_read_b].op.axis[0], nparts=num_thread
)
axis_tx, axis_xi = schedule[cache_read_b].split(axis_xi, nparts=num_thread)
schedule[cache_read_b].bind(axis_ty, thread_y)
schedule[cache_read_b].bind(axis_tx, thread_x)
schedule = schedule.normalize()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
with tvm.target.Target(device):
f = tvm.build(schedule, [placeholder_a, placeholder_b, result_c])
num_n = dim1_length
num_m = num_n
num_l = num_n
a_np = np.random.uniform(size=(num_n, num_l)).astype(placeholder_a.dtype)
b_np = np.random.uniform(size=(num_m, num_l)).astype(placeholder_b.dtype)
buff_a = tvm.nd.array(a_np, dev)
buff_b = tvm.nd.array(b_np, dev)
buff_c = tvm.nd.array(np.zeros((num_n, num_m), dtype=result_c.dtype), dev)
ftimer = f.time_evaluator(f.entry_name, dev, number=1)
tcost = ftimer(buff_a, buff_b, buff_c).mean
print("%s: exec=%g sec/op" % (dev, tcost))
tvm.testing.assert_allclose(buff_c.numpy(), np.dot(a_np, b_np.T), rtol=1e-5)
check_device("vulkan")
check_device("nvptx -mcpu=sm_20")
check_device("rocm")
check_device("metal")
check_device("opencl")
check_device("cuda")
if __name__ == "__main__":
test_gemm() |
"""
Test the tuner
""" |
import logging |
import multiprocessing as mp |
import textwrap |
import tvm |
import tvm.relay |
import tvm.testing
from tvm |
import autotvm, te
from tvm.autotvm.measure |
import measure_methods
from tvm.autotvm.tuner |
import RandomTuner
from tvm.contrib |
import tar
from tvm.ir.instrument |
import pass_instrument
from tvm.ir.transform |
import PassContext
from tvm.target |
import Target
from tvm.tir.analysis |
import _ffi_api as _analysis_ffi_api
def setup_module():
"""Setup the module used for testing."""
@autotvm.template("testing/conv2d_no_batching")
def conv2d_no_batching(
batch_size, input_h, input_w, channels_in, channels_out, kernel_h, kernel_w
):
"""An example template for testing"""
assert batch_size == 1, "Only consider batch_size = 1 in this template"
data = te.placeholder((batch_size, channels_in, input_h, input_w), name="data")
kernel = te.placeholder((channels_out, channels_in, kernel_h, kernel_w), name="kernel")
axis_rc = te.reduce_axis((0, channels_in), name="rc")
axis_ry = te.reduce_axis((0, kernel_h), name="ry")
axis_rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch_size, channels_out, input_h - kernel_h + 1, input_w - kernel_w + 1),
lambda nn, ff, yy, xx: te.sum(
data[nn, axis_rc, yy + axis_ry, xx + axis_rx]
* kernel[ff, axis_rc, axis_ry, axis_rx],
axis=[axis_rc, axis_ry, axis_rx],
),
tag="conv2d_nchw",
)
schedule = te.create_schedule([conv.op])
output = conv
cache_write_ol = schedule.cache_write(conv, "local")
cache_read_aa = schedule.cache_read(data, "shared", [cache_write_ol])
cache_read_ww = schedule.cache_read(kernel, "shared", [cache_write_ol])
cache_read_al = schedule.cache_read(cache_read_aa, "local", [cache_write_ol])
cache_read_wl = schedule.cache_read(cache_read_ww, "local", [cache_write_ol])
axis_n, axis_f, axis_y, axis_x = schedule[output].op.axis
cfg = autotvm.get_config()
cfg.define_split("tile_f", cfg.axis(axis_f), num_outputs=4)
cfg.define_split("tile_y", cfg.axis(axis_y), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(axis_x), num_outputs=4)
axis_bf, axis_vf, axis_tf, axis_fi = cfg["tile_f"].apply(schedule, output, axis_f)
axis_b |
y, axis_vy, axis_ty, axis_yi = cfg["tile_y"].apply(schedule, output, axis_y)
axis_bx, axis_vx, axis_tx, axis_xi = cfg["tile_x"].apply(schedule, output, axis_x)
kernel_scope = axis_n
schedule[output].bind(axis_bf, te.thread_axis("blockIdx.z"))
schedule[output].bind(axis_by, te.thread_axis("blockIdx.y"))
schedule[output].bind(axis_bx, te.thread_axis("blockIdx.x"))
schedule[output].bind(axis_vf, te.thread_axis("vthread"))
schedule[output].bind(axis_vy, te.thread_axis("vthread"))
schedule[output].bind(axis_vx, te.thread_axis("vthread"))
schedule[output].bind(axis_tf, te.thread_axis("threadIdx.z"))
schedule[output].bind(axis_ty, te.thread_axis("threadIdx.y"))
schedule[output].bind(axis_tx, te.thread_axis("threadIdx.x"))
schedule[output].reorder(
axis_n,
axis_bf,
axis_by,
axis_bx,
axis_vf,
axis_vy,
axis_vx,
axis_tf,
axis_ty,
axis_tx,
axis_fi,
axis_yi,
axis_xi,
)
schedule[cache_write_ol].compute_at(schedule[output], axis_tx)
axis_n, axis_f, axis_y, axis_x = schedule[cache_write_ol].op.axis
axis_rc, axis_ry, axis_rx = schedule[cache_write_ol].op.reduce_axis
cfg.define_split("tile_rc", cfg.axis(axis_rc), num_outputs=3)
cfg.define_split("tile_ry", cfg.axis(axis_ry), num_outputs=3)
cfg.define_split("tile_rx", cfg.axis(axis_rx), num_outputs=3)
axis_rco, axis_rcm, axis_rci = cfg["tile_rc"].apply(schedule, cache_write_ol, axis_rc)
axis_ryo, axis_rym, axis_ryi = cfg["tile_rx"].apply(schedule, cache_write_ol, axis_ry)
axis_rxo, axis_rxm, axis_rxi = cfg["tile_ry"].apply(schedule, cache_write_ol, axis_rx)
schedule[cache_write_ol].reorder(
axis_rco,
axis_ryo,
axis_rxo,
axis_rcm,
axis_rym,
axis_rxm,
axis |
_rci,
axis_ryi,
axis_rxi,
axis_n,
axis_f,
axis_y,
axis_x,
)
schedule[cache_read_aa].compute_at(schedule[cache_write_ol], axis_rxo)
schedule[cache_read_ww].compute_at(schedule[cache_write_ol], axis_rxo)
schedule[cache_read_al].compute_at(schedule[cache_write_ol], axis_rxm)
schedule[cache_read_wl].compute_at(schedule[cache_write_ol], axis_rxm)
for load in [cache_read_aa, cache_read_ww]:
axis_n, axis_f, axis_y, axis_x = schedule[load].op.axis
fused = schedule[load].fuse(axis_n, axis_f, axis_y, axis_x)
axis_tz, fused = schedule[load].split(fused, nparts=cfg["tile_f"].size[2])
axis_ty, fused = schedule[load].split(fused, nparts=cfg["tile_y"].size[2])
axis_tx, fused = schedule[load].split(fused, nparts=cfg["tile_x"].size[2])
schedule[load].bind(axis_tz, te.thread_axis("threadIdx.z"))
schedule[load].bind(axis_ty, te.thread_axis("threadIdx.y"))
schedule[load].bind(axis_tx, te.thread_axis("threadIdx.x"))
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
schedule[output].pragma(
kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val
)
schedule[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
return schedule, [data, kernel, conv]
def teardown_module():
"""Remove the module from the autotvm task tables."""
del autotvm.task.task.TASK_TABLE["testing/conv2d_no_batching"]
def get_sample_task(target=tvm.target.cuda(), target_host=None):
"""return a sample task for testing"""
target, target_host = Target.canon_target_and_host(target, target_host)
task = autotvm.task.create(
"testing/conv2d_no_batching", args=(1, 7, 7, 512, 512, 3, 3), target=target
)
return task, target
def run_test_with_all_multiproces |
sing(func, *args, **kwargs):
"""Check all multiprocessing methods work for the tuning test.
In the past fork() had the most support at detriment to spawn() and forkserver().
As fork() is unavailable or unsafe on some platforms it is good to check all
available methods.
"""
for multiprocessing_method in mp.get_all_start_methods():
old_start_method = mp.get_start_method()
try:
mp.set_start_method(multiprocessing_method, force=True)
func(*args, **kwargs)
finally:
mp.set_start_method(old_start_method, force=True)
@tvm.testing.parametrize_targets("cuda", "opencl")
def test_tuning_gpu(target):
"""Test gpu tuning."""
def runner(target):
task, target = get_sample_task(target, None)
logging.info("task config space: %s", task.config_space)
measure_option = autotvm.measure_option(autotvm.LocalBuilder(), autotvm.LocalRunner())
results = []
tuner = RandomTuner(task)
tuner.tune(
n_trial=20,
measure_option=measure_option,
callbacks=(lambda _tuner, _inputs, rs: results.extend(rs),),
)
assert len(results) == 20
successful_results = [
r
for r in results
if r.error_no == autotvm.MeasureErrorNo.NO_ERROR
or r.error_no == autotvm.MeasureErrorNo.INSTANTIATION_ERROR
]
assert len(successful_results) > 0, f"No successful tuning runs: {results!r}"
run_test_with_all_multiprocessing(runner, target)
@tvm.testing.parametrize_targets("cuda", "opencl")
def test_tuning_gpu_inherits_pass_context(target):
"""Autotvm tuner inherits PassContexts but also adds a gpu verification pass by default.
Test that using PassContext inherits passes properly but also runs gpu verification pass.
"""
@pass_instrument
class PassInstrumentChecker:
"""Pass Instrument that simply sees if it's been run."""
def __init__(se |
lf):
self.has_been_run = False
def run_after_pass(self, *_):
self.has_been_run = True
class GPUVerifyPassMocked:
"""Context manager that mocks tir.analysis.verify_gpu_code meant
to verify the pass has been run. This is done by patching the ffi func handles."""
FFI_FUNC_HANDLE = "tir.analysis.verify_gpu_code"
FUNC_NAME = "verify_gpu_code"
def __init__(self) -> None:
self.old_impl = tvm._ffi.get_global_func(self.FFI_FUNC_HANDLE)
self.has_been_run = False
def gpu_verify_pass_mocked(self):
"""Get the replacement for the gpu verification pass."""
def _gpu_verify_pass_mocked(*args, **kwargs):
self.has_been_run = True
return self.old_impl(*args, **kwargs)
return _gpu_verify_pass_mocked
def __enter__(self):
tvm._ffi.register_func(
self.FFI_FUNC_HANDLE, self.gpu_verify_pass_mocked(), override=True
)
setattr(
_analysis_ffi_api, self.FUNC_NAME, tvm._ffi.get_global_func(self.FFI_FUNC_HANDLE)
)
def __exit__(self, *args, **kwargs):
tvm._ffi.register_func(self.FFI_FUNC_HANDLE, self.old_impl, override=True)
setattr(_analysis_ffi_api, self.FUNC_NAME, self.old_impl) |
class OverwrittenBuildFunc(measure_methods._WrappedBuildFunc):
"""BuildFunc that mocks and patches as necessary to test proper passes are run."""
def __call__(self, measure_input, tmp_dir, **kwargs):
instrument = PassInstrumentChecker()
mocked_pass_checker = GPUVerifyPassMocked()
with mocked_pass_checker:
with PassContext(instruments=[instrument]):
regular_result = super().__call__(measure_input, tmp_dir, **kwargs)
assert instrument.has_been_run
assert mocked_pass_checker.has_been_run
return regular_result |
class MockedLocalBuilder(measure_methods.LocalBuilder):
"""As measure_methods.LocalBuilder but overwrites the PassContext for testing."""
def __init__(
self,
timeout=10,
n_parallel=None,
build_kwargs=None,
build_func="default",
do_fork=False,
runtime=None,
):
super().__init__(timeout, n_parallel, build_kwargs, build_func, do_fork, runtime)
self.build_func = OverwrittenBuildFunc(tar.tar, runtime)
def runner(target):
task, target = get_sample_task(target, None)
logging.info("task config space: %s", task.config_space)
measure_option = autotvm.measure_option(MockedLocalBuilder(), autotvm.LocalRunner())
results = []
tuner = RandomTuner(task)
tuner.tune(
n_trial=1,
measure_option=measure_option,
callbacks=(lambda _tuner, _inputs, rs: results.extend(rs),),
)
assert len(results) == 1
run_test_with_all_multiprocessing(runner, target)
def test_tuning_cpu():
"""Test tuning on cpu."""
def runner():
ir_mod = tvm.parser.fromtext(
textwrap.dedent(
"""
def @main(%a : Tensor[(1, 3, 32, 32), float32], %b : Tensor[(3, 3, 5, 5), float32]) {
nn.conv2d(%a, %b, data_layout="NCHW", kernel_layout="OIHW")
}
"""
)
)
tasks = autotvm.task.relay_integration.extract_from_program(
ir_mod, {}, tvm.target.create("llvm")
)
assert len(tasks) == 1, f"Extracted != 1 task from program: {tasks!r}"
task = tasks[0]
measure_option = autotvm.measure_option(autotvm.LocalBuilder(), autotvm.LocalRunner())
results = []
tuner = RandomTuner(task)
tuner.tune(
n_trial=20,
measure_option=measure_option,
callbacks=(lambda _tuner, _inputs, rs: results.extend( |
rs),),
)
assert len(results) == 20
successful_results = [r for r in results if r.error_no == autotvm.MeasureErrorNo.NO_ERROR]
assert len(successful_results) > 0, f"No successful tuning runs: {results!r}"
run_test_with_all_multiprocessing(runner)
if __name__ == "__main__":
tvm.testing.main() |
"""Test workload for lowering and build.""" |
import numpy as np |
import tvm |
import tvm.testing
from tvm.script |
import tir as T
@T.prim_func
def tensorcore_gemm(handle_a: T.handle, handle_b: T.handle, handle_c: T.handle) -> None:
match_buffer_a = T.match_buffer(handle_a, [1024, 1024], "float16")
match_buffer_b = T.match_buffer(handle_b, [1024, 1024], "float16")
match_buffer_c = T.match_buffer(handle_c, [1024, 1024], "float32")
for block_idx_x in T.thread_binding(0, 16, "blockIdx.x"):
for block_idx_y in T.thread_binding(0, 8, "blockIdx.y"):
with T.block():
axis_bx, axis_by = T.axis.remap("SS", [block_idx_x, block_idx_y])
shared_a = T.alloc_buffer([1024, 1024], "float16", scope="shared")
shared_b = T.alloc_buffer([1024, 1024], "float16", scope="shared")
wmma_a = T.alloc_buffer([1024, 1024], "float16", scope="wmma.matrix_a")
wmma_b = T.alloc_buffer([1024, 1024], "float16", scope="wmma.matrix_b")
wmma_c = T.alloc_buffer([1024, 1024], "float32", scope="wmma.accumulator")
for thread_ty in T.thread_binding(0, 2, "threadIdx.y"):
for thread_tz in T.thread_binding(0, 2, "threadIdx.z"):
for index_i, index_jj in T.grid(2, 4):
with T.block():
new_axis_vi = T.axis.S(64, axis_bx * 4 + thread_ty * 2 + index_i)
new_axis_vj = T.axis.S(64, axis_by * 8 + thread_tz * 4 + index_jj)
T.reads([])
T.writes(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
]
)
match_buffer_c0 = T.match_buffer(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16, |
new_axis_vj * 16 : new_axis_vj * 16 + 16,
],
(16, 16),
"float32",
strides=[16 * 4, 1],
scope="wmma.accumulator",
offset_factor=1,
)
T.evaluate(
T.tvm_fill_fragment(
match_buffer_c0.data,
16,
16,
16,
index_i * 4 + index_jj,
T.float32(0),
dtype="handle",
)
)
for k_o in range(0, 32):
for thread_tx in T.thread_binding(0, 32, "threadIdx.x"):
for index_i0, index_j0 in T.grid(1, 4):
for index_j1 in T.vectorized(0, 4):
with T.block():
new_axis_vi = T.axis.S(
1024,
axis_bx * 64
+ thread_ty * 32
+ thread_tx
+ index_i0,
)
new_axis_vj = T.axis.S(
1024,
k_o * 32 + thread_tz * 16 + index_j0 * 4 + index_j1,
) |
shared_a[new_axis_vi, new_axis_vj + 8] = match_buffer_a[
new_axis_vi, new_axis_vj
]
for index_i0, index_j0 in T.grid(2, 4):
for index_j1 in T.vectorized(0, 4):
with T.block():
new_axis_vi = T.axis.S(
1024,
axis_by * 128
+ thread_ty * 64
+ thread_tx * 2
+ index_i0,
)
new_axis_vj = T.axis.S(
1024,
k_o * 32 + thread_tz * 16 + index_j0 * 4 + index_j1,
)
shared_b[new_axis_vi, new_axis_vj + 8] = match_buffer_b[
new_axis_vi, new_axis_vj
]
for k_i in range(0, 2):
for index_i in range(0, 2):
with T.block():
new_axis_vi = T.axis.S(
64, axis_bx * 4 + thread_ty * 2 + index_i
)
axis_vk = T.axis.S(64, k_o * 2 + k_i)
T.reads(
shared_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16 + 8, |
]
)
T.writes(
wmma_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
]
)
stride0 = T.var("int32")
stride1 = T.var("int32")
match_buffer_a0 = T.match_buffer(
shared_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16 + 8,
],
(16, 16 + 8),
"float16",
strides=[stride0, stride1],
scope="shared",
offset_factor=1,
)
wmma_a0 = T.match_buffer(
wmma_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
(16, 16),
"float16",
strides=[16, 1],
scope="wmma.matrix_a",
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync( |
wmma_a0.data,
16,
16,
16,
index_i,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
match_buffer_a0.data,
match_buffer_a0.elem_offset + 8,
match_buffer_a0.strides[0],
1,
dtype="handle",
),
match_buffer_a0.strides[0],
"row_major",
dtype="handle",
)
)
for index_jj in range(0, 4):
with T.block():
new_axis_vj = T.axis.S(
64, axis_by * 8 + thread_tz * 4 + index_jj
)
axis_vk = T.axis.S(64, k_o * 2 + k_i)
T.reads(
shared_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16 + 8,
]
)
T.writes(
wmma_b[
new_axis_vj * 16 : new_axis_vj * |
16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
]
)
stride0 = T.var("int32")
stride1 = T.var("int32")
match_buffer_b0 = T.match_buffer(
shared_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16 + 8,
],
(16, 16 + 8),
"float16",
strides=[stride0, stride1],
scope="shared",
offset_factor=1,
)
wmma_b0 = T.match_buffer(
wmma_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
(16, 16),
"float16",
strides=[16, 1],
scope="wmma.matrix_b",
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
wmma_b0.data,
16,
16,
16, |
index_jj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
match_buffer_b0.data,
match_buffer_b0.elem_offset + 8,
match_buffer_b0.strides[0],
1,
dtype="handle",
),
match_buffer_b0.strides[0],
"col_major",
dtype="handle",
)
)
for index_i, index_jj in T.grid(2, 4):
with T.block():
new_axis_vi = T.axis.S(
64, axis_bx * 4 + thread_ty * 2 + index_i
)
new_axis_vj = T.axis.S(
64, axis_by * 8 + thread_tz * 4 + index_jj
)
axis_vk = T.axis.R(64, k_o * 2 + k_i)
T.reads(
[
wmma_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
wmma_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16, |
axis_vk * 16 : axis_vk * 16 + 16,
],
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
],
]
)
T.writes(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
]
)
wmma_a1 = T.match_buffer(
wmma_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
(16, 16),
"float16",
strides=[16, 1],
scope="wmma.matrix_a",
offset_factor=1,
)
wmma_b1 = T.match_buffer(
wmma_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
(16, 16),
"float16",
strides=[16, 1], |
scope="wmma.matrix_b",
offset_factor=1,
)
wmma_c1 = T.match_buffer(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
],
(16, 16),
"float32",
strides=[16 * 4, 1],
scope="wmma.accumulator",
offset_factor=1,
)
T.evaluate(
T.tvm_mma_sync(
wmma_c1.data,
index_i * 4 + index_jj,
wmma_a1.data,
index_i,
wmma_b1.data,
index_jj,
wmma_c1.data,
index_i * 4 + index_jj,
dtype="handle",
)
)
for index_i, index_jj in T.grid(2, 4):
with T.block():
new_axis_vi = T.axis.S(64, axis_bx * 4 + thread_ty * 2 + index_i)
new_axis_vj = T.axis.S(64, axis_by * 8 + thread_tz * 4 + index_jj)
T.reads(
wmma_c[ |
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
]
)
T.writes(
match_buffer_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
]
)
stride0 = T.var("int32")
stride1 = T.var("int32")
wmma_c2 = T.match_buffer(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
],
(16, 16),
"float32",
strides=[16 * 4, 1],
scope="wmma.accumulator",
offset_factor=1,
)
match_buffer_c1 = T.match_buffer(
match_buffer_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
],
(16, 16),
"float32",
strides=[stride0, stride1],
offset_factor=1,
)
T.evaluate(
T.tvm_store_matrix_sync(
wmma_c2.data,
16, |
16,
16,
index_i * 4 + index_jj,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
match_buffer_c1.data,
match_buffer_c1.elem_offset,
match_buffer_c1.strides[0],
1,
dtype="handle",
),
match_buffer_c1.strides[0],
"row_major",
dtype="handle",
)
)
@tvm.testing.requires_cuda
def test_gemm_tensorcore():
"""Test running gemm on tensorcore."""
dev = tvm.device("cuda", 0)
a_np = np.random.uniform(size=(1024, 1024)).astype("float16")
b_np = np.random.uniform(size=(1024, 1024)).astype("float16")
c_np = np.dot(a_np.astype("float32"), b_np.T.astype("float32"))
buff_a = tvm.nd.array(a_np, dev)
buff_b = tvm.nd.array(b_np, dev)
buff_c = tvm.nd.array(np.zeros((1024, 1024), dtype="float32"), dev)
myfunc = tvm.build(tensorcore_gemm, target="cuda", name="dense")
myfunc(buff_a, buff_b, buff_c)
tvm.testing.assert_allclose(buff_c.numpy(), c_np, rtol=1e-3)
evaluator = myfunc.time_evaluator(myfunc.entry_name, dev, number=100)
time_elapsed = evaluator(buff_a, buff_b, buff_c).mean
num_flops = 2 * 1024 * 1024 * 1024
gflops = num_flops / (time_elapsed * 1e3) / 1e6
print("gemm with tensor core: %f ms" % (time_elapsed * 1e3))
print("GFLOPS: %f" % gflops)
if __name__ == "__main__":
test_gemm_tensorcore() |
"""Test scheduling of reduction operations.""" |
import pytest |
import numpy as np |
import tvm
from tvm |
import te, topi
from tvm.driver.build_module |
import schedule_to_module |
import tvm.testing |
import tvm.topi.testing
@tvm.testing.requires_gpu
def test_reduce_prims():
"""Test reduction operations."""
def test_prim(reducer, np_reducer):
size_var_n = tvm.te.size_var("n")
size_var_m = tvm.te.size_var("m")
placeholder_a = te.placeholder((size_var_n, size_var_m), name="A")
result_r = te.compute((size_var_n,), lambda i: tvm.tir.Select((i > 1), 1, 0), name="R")
axis_k = te.reduce_axis((0, size_var_m))
result_b = te.compute(
(size_var_n,),
lambda i: reducer(placeholder_a[i, axis_k], axis=axis_k, where=(result_r[i] == 1)),
name="B",
)
schedule = te.create_schedule(result_b.op)
num_thread = 1
axis_x0, axis_x1 = schedule[result_b].split(result_b.op.axis[0], factor=num_thread)
schedule[result_b].bind(axis_x0, te.thread_axis("blockIdx.x"))
schedule[result_b].bind(axis_x1, te.thread_axis("threadIdx.x"))
schedule[result_r].compute_inline()
def check_device(device, host="llvm"):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
freduce = tvm.build(
schedule,
args=[placeholder_a, result_b],
target=tvm.target.Target(device, host),
name="myreduce",
)
num_n = 1028
num_m = 129
buff_x = tvm.nd.array(
np.random.uniform(size=(num_n, num_m)).astype(placeholder_a.dtype), dev
)
buff_y = tvm.nd.array(np.zeros(num_n, dtype=result_b.dtype), dev)
freduce(buff_x, buff_y)
npy = buff_y.numpy()
npy[:2] = 0
res = np_reducer(buff_x.numpy(), axis=1)
res[:2] = 0
tvm.testing.assert_allclose(npy, res, rtol=1e-4)
check_device("metal")
check_device("vulkan") |
check_device("cuda")
check_device("opencl")
check_device("rocm")
test_prim(te.sum, np.sum)
test_prim(tvm.te.min, np.amin)
test_prim(tvm.te.max, np.amax)
def test_init_imm():
"""Test initial values which are immutable in reduction ops."""
num_n = 1027
arr_length = tvm.runtime.convert(num_n)
placeholder_a = te.placeholder((arr_length,), name="A")
axis_k = te.reduce_axis((0, arr_length))
result_b = te.compute(
(), lambda: te.sum(placeholder_a[axis_k], axis=axis_k, init=10.0), name="B"
)
schedule_s = te.create_schedule(result_b.op)
def check_target(target="llvm"):
if not tvm.runtime.enabled(target):
return
dev = tvm.cpu(0)
fapi = tvm.lower(schedule_s, args=[placeholder_a, result_b])
fsum = tvm.build(fapi, target=target, name="mysum")
buff_a = tvm.nd.array(np.random.uniform(size=(num_n,)).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.zeros((), dtype=result_b.dtype), dev)
fsum(buff_a, buff_b)
res = 10.0 + np.sum(buff_a.numpy(), axis=0)
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target()
def test_init():
"""Test initializer which is non-const."""
num_n = 1027
arr_length = tvm.runtime.convert(num_n)
placeholder_a = te.placeholder((arr_length, arr_length), name="A")
placeholder_c = te.placeholder((arr_length, arr_length), name="C")
placeholder_i = te.placeholder((arr_length, arr_length), name="I")
axis_k = te.reduce_axis((0, arr_length))
result_b = te.compute(
(arr_length, arr_length),
lambda i, j: te.sum(
placeholder_a[i, axis_k] * placeholder_c[axis_k, j],
axis=axis_k,
init=placeholder_i[i, j],
),
name="B",
)
schedule = te.create_schedule(result_b.op)
def check_target(target="llvm"):
if not tvm.runtime.enabled(target):
return
dev = tvm.cpu(0) |
fapi = tvm.lower(schedule, args=[placeholder_a, placeholder_c, placeholder_i, result_b])
print(fapi)
mmult = tvm.build(fapi, target=target, name="mmult")
buff_a = tvm.nd.array(
np.random.uniform(size=(num_n, num_n)).astype(placeholder_a.dtype), dev
)
buff_c = tvm.nd.array(
np.random.uniform(size=(num_n, num_n)).astype(placeholder_c.dtype), dev
)
buff_i = tvm.nd.array(np.random.uniform(size=(num_n, num_n)).astype(result_b.dtype), dev)
buf_b = tvm.nd.array(np.zeros((num_n, num_n), dtype=result_b.dtype), dev)
mmult(buff_a, buff_c, buff_i, buf_b)
res = buff_i.numpy() + np.matmul(buff_a.numpy(), buff_c.numpy())
tvm.testing.assert_allclose(buf_b.numpy(), res, rtol=1e-4)
check_target()
def test_rfactor():
"""Test rfactors."""
num_n = 1027
arr_length = tvm.runtime.convert(num_n)
placeholder_a = te.placeholder((arr_length,), name="A")
axis_k = te.reduce_axis((0, arr_length))
placeholder_b = te.compute((), lambda: te.sum(placeholder_a[axis_k], axis=axis_k), name="B")
schedule = te.create_schedule(placeholder_b.op)
axis_kf, _ = schedule[placeholder_b].split(axis_k, nparts=4)
rfactor_bf = schedule.rfactor(placeholder_b, axis_kf)
schedule[rfactor_bf].parallel(rfactor_bf.op.axis[0])
def check_target(target="llvm"):
if not tvm.testing.device_enabled(target):
return
dev = tvm.cpu(0)
fapi = tvm.lower(schedule, args=[placeholder_a, placeholder_b])
fsum = tvm.build(fapi, target=target, name="mysum")
buff_a = tvm.nd.array(np.random.uniform(size=(num_n,)).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.zeros((), dtype=placeholder_b.dtype), dev)
fsum(buff_a, buff_b)
res = np.sum(buff_a.numpy(), axis=0)
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target()
def test_rfactor_init():
"""Test rfactors with constant inits.""" |
num_n = 1027
arr_length = tvm.runtime.convert(num_n)
placeholder_a = te.placeholder((arr_length, arr_length), name="A")
placeholder_c = te.placeholder((arr_length, arr_length), name="C")
placeholder_i = te.placeholder((arr_length, arr_length), name="I")
axis_k = te.reduce_axis((0, arr_length))
result_b = te.compute(
(arr_length, arr_length),
lambda i, j: te.sum(
placeholder_a[i, axis_k] * placeholder_c[axis_k, j],
axis=axis_k,
init=placeholder_i[i, j],
),
name="B",
)
schedule = te.create_schedule(result_b.op)
axis_kf, _ = schedule[result_b].split(axis_k, nparts=4)
rfactor_bf = schedule.rfactor(result_b, axis_kf, 1)
schedule[rfactor_bf].parallel(rfactor_bf.op.axis[0])
def check_target(target="llvm"):
if not tvm.runtime.enabled(target):
return
dev = tvm.cpu(0)
fapi = tvm.lower(schedule, args=[placeholder_a, placeholder_c, placeholder_i, result_b])
print(fapi)
mmult = tvm.build(fapi, target=target, name="mmult")
buff_a = tvm.nd.array(
np.random.uniform(size=(num_n, num_n)).astype(placeholder_a.dtype), dev
)
buff_c = tvm.nd.array(
np.random.uniform(size=(num_n, num_n)).astype(placeholder_c.dtype), dev
)
buff_i = tvm.nd.array(np.random.uniform(size=(num_n, num_n)).astype(result_b.dtype), dev)
buff_b = tvm.nd.array(np.zeros((num_n, num_n), dtype=result_b.dtype), dev)
mmult(buff_a, buff_c, buff_i, buff_b)
res = buff_i.numpy() + np.matmul(buff_a.numpy(), buff_c.numpy())
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target()
def test_rfactor_factor_axis():
"""Test rfactors across axis."""
num_n = 1027
arr_length = tvm.runtime.convert(num_n)
placeholder_a = te.placeholder((arr_length,), name="A")
axis_k = te.reduce_axis((0, arr_length))
placeholder_b = te.compute((), lambda: te.sum(placeho |
lder_a[axis_k], axis=axis_k), name="B")
schedule = te.create_schedule(placeholder_b.op)
axis_kf, _ = schedule[placeholder_b].split(axis_k, nparts=4)
rfactor_bf = schedule.rfactor(placeholder_b, axis_kf, 0)
schedule[rfactor_bf].parallel(rfactor_bf.op.axis[0])
def check_target(target="llvm"):
if not tvm.testing.device_enabled(target):
return
dev = tvm.cpu(0)
fapi = tvm.lower(schedule, args=[placeholder_a, placeholder_b])
fsum = tvm.build(fapi, target=target, name="mysum")
buff_a = tvm.nd.array(np.random.uniform(size=(num_n,)).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.zeros((), dtype=placeholder_b.dtype), dev)
fsum(buff_a, buff_b)
res = np.sum(buff_a.numpy(), axis=0)
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target()
@tvm.testing.requires_gpu
def test_rfactor_threads():
"""Test rfactors across threads."""
num_n = 1027
num_m = 10
length_n = tvm.runtime.convert(num_n)
length_m = tvm.runtime.convert(num_m)
placeholder_a = te.placeholder((length_m, length_n), name="A")
axis_k = te.reduce_axis((0, length_n))
nthread = 16
result_b = te.compute(
(length_m,),
lambda i: te.sum(placeholder_a[i, axis_k], axis=axis_k, where=(i > 1)),
name="B",
)
schedule = te.create_schedule(result_b.op)
_, axis_kf = schedule[result_b].split(axis_k, factor=nthread)
rfactor_bf = schedule.rfactor(result_b, axis_kf)
axis_bx, axis_ty = schedule[result_b].split(schedule[result_b].op.axis[0], factor=nthread)
schedule[result_b].bind(axis_bx, te.thread_axis("blockIdx.x"))
schedule[result_b].bind(axis_ty, te.thread_axis("threadIdx.y"))
axis_tx = schedule[result_b].op.reduce_axis[0]
thread_x = te.thread_axis("threadIdx.x")
schedule[result_b].bind(axis_tx, thread_x)
schedule[rfactor_bf].compute_at(schedule[result_b], axis_tx)
schedule[result_b].set_store_predicate(thread_x.var |
.equal(0))
def check_target(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fapi = tvm.lower(schedule, args=[placeholder_a, result_b])
fsum = tvm.build(fapi, target=device, name="mysum")
buff_a = tvm.nd.array(
np.random.uniform(size=(num_m, num_n)).astype(placeholder_a.dtype), dev
)
buff_b = tvm.nd.array(np.zeros(num_m, dtype=result_b.dtype), dev)
fsum(buff_a, buff_b)
res = np.sum(buff_a.numpy(), axis=1)
res[:2] = 0
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target("vulkan")
check_target("cuda")
check_target("metal")
check_target("opencl")
check_target("rocm")
@tvm.testing.requires_gpu
def test_rfactor_elemwise_threads():
"""Test rfactor elemwise threads."""
num_n = 1025
num_m = 10
placeholder_a = te.placeholder((num_m, num_n), name="A")
axis_k = te.reduce_axis((0, num_n))
nthread = 16
result_b = te.compute(
(num_m,), lambda i: te.sum(placeholder_a[i, axis_k], axis=axis_k), name="B"
)
result_bb = te.compute((num_m,), lambda i: result_b[i] + 1, name="BB")
result_c = te.compute((num_m,), lambda i: result_bb[i] + 1, name="C")
schedule = te.create_schedule(result_c.op)
schedule[result_bb].compute_inline()
axis_bx, axis_ty = schedule[result_c].split(schedule[result_c].op.axis[0], factor=nthread)
_, axis_kf = schedule[result_b].split(axis_k, factor=nthread)
rfactor_bf = schedule.rfactor(result_b, axis_kf)
schedule[result_b].compute_at(schedule[result_c], axis_ty)
schedule[result_c].bind(axis_bx, te.thread_axis("blockIdx.x"))
schedule[result_c].bind(axis_ty, te.thread_axis("threadIdx.y"))
axis_tx = schedule[result_b].op.reduce_axis[0]
thread_x = te.thread_axis("threadIdx.x")
schedule[result_b].bind(axis_tx, thread_x)
schedule[rfactor_bf].compute_ |
at(schedule[result_b], axis_tx)
schedule[result_b].set_store_predicate(thread_x.var.equal(0))
schedule[result_c].set_store_predicate(thread_x.var.equal(0))
def check_target(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fapi = tvm.lower(schedule, args=[placeholder_a, result_c])
fsum = tvm.build(fapi, target=device, name="mysum")
buff_a = tvm.nd.array(
np.random.uniform(size=(num_m, num_n)).astype(placeholder_a.dtype), dev
)
buff_b = tvm.nd.array(np.zeros(num_m, dtype=result_b.dtype), dev)
fsum(buff_a, buff_b)
res = np.sum(buff_a.numpy(), axis=1) + 2
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target("vulkan")
check_target("cuda")
check_target("metal")
check_target("opencl")
check_target("rocm")
def test_argmax():
"""Test argmax."""
def fcombine(tensor_x, tensor_y):
lhs = tvm.tir.Select((tensor_x[1] >= tensor_y[1]), tensor_x[0], tensor_y[0])
rhs = tvm.tir.Select((tensor_x[1] >= tensor_y[1]), tensor_x[1], tensor_y[1])
return lhs, rhs
def fidentity(tensor1, tensor2):
return tvm.tir.const(-1, tensor1), tvm.te.min_value(tensor2)
argmax = te.comm_reducer(fcombine, fidentity, name="argmax")
size_var_m = te.size_var("m")
size_var_n = te.size_var("n")
idx = te.placeholder((size_var_m, size_var_n), name="idx", dtype="int32")
val = te.placeholder((size_var_m, size_var_n), name="val", dtype="float32")
axis_k = te.reduce_axis((0, size_var_n), "k")
result_t0, result_t1 = te.compute(
(size_var_m,), lambda i: argmax((idx[i, axis_k], val[i, axis_k]), axis=axis_k), name="T"
)
schedule = te.create_schedule(result_t0.op)
def check_target():
device = "cpu"
if not tvm.testing.device_enabled(device):
print("skip because %s is not |
enabled.." % device)
return
dev = tvm.device(device, 0)
fapi = tvm.lower(schedule, args=[idx, val, result_t0, result_t1])
fargmax = tvm.build(fapi, target="llvm", name="argmax")
height = 12
width = 16
np_idx = np.repeat(np.arange(width, dtype="int32").reshape(1, width), height, axis=0)
np_val = np.random.uniform(size=(height, width)).astype("float32")
np_res = np.argmax(np_val, axis=1)
nd_idx = tvm.nd.array(np_idx, dev)
nd_val = tvm.nd.array(np_val, dev)
nd_res0 = tvm.nd.array(np.zeros(height, dtype="int32"), dev)
nd_res1 = tvm.nd.array(np.zeros(height, dtype="float32"), dev)
fargmax(nd_idx, nd_val, nd_res0, nd_res1)
tvm.testing.assert_allclose(np_res, nd_res0.numpy())
check_target()
@tvm.testing.requires_gpu
def test_rfactor_argmax():
"""Test rfactor argmax"""
def fcombine(tensor0, tensor1):
lhs = tvm.tir.Select((tensor0[1] >= tensor1[1]), tensor0[0], tensor1[0])
rhs = tvm.tir.Select((tensor0[1] >= tensor1[1]), tensor0[1], tensor1[1])
return lhs, rhs
def fidentity(tensor0, tensor1):
return tvm.tir.const(-1, tensor0), tvm.te.min_value(tensor1)
argmax = te.comm_reducer(fcombine, fidentity, name="argmax")
num_width = 1027
num_height = 10
width = tvm.runtime.convert(num_width)
height = tvm.runtime.convert(num_height)
placeholder_a0 = te.placeholder((height, width), name="A0", dtype="int32")
placeholder_a1 = te.placeholder((height, width), name="A1", dtype="float32")
axis_k = te.reduce_axis((0, width))
result_b0, result_b1 = te.compute(
(height,),
lambda i: argmax((placeholder_a0[i, axis_k], placeholder_a1[i, axis_k]), axis=axis_k),
name="B",
)
schedule = te.create_schedule(result_b0.op)
nthread = 16
_, axis_kf = schedule[result_b0].split(axis_k, factor=nthread)
rfactor_bf0, _ = schedule.rfactor(result_b0, axis_kf)
axis_bx, axis_ty = schedule[result_b |
0].split(schedule[result_b0].op.axis[0], factor=nthread)
schedule[result_b0].bind(axis_bx, te.thread_axis("blockIdx.x"))
schedule[result_b0].bind(axis_ty, te.thread_axis("threadIdx.y"))
axis_tx = schedule[result_b0].op.reduce_axis[0]
thread_x = te.thread_axis("threadIdx.x")
schedule[result_b0].bind(axis_tx, thread_x)
schedule[rfactor_bf0.op].compute_at(schedule[result_b0], axis_tx)
schedule[result_b0].set_store_predicate(thread_x.var.equal(0))
def check_target(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fapi = tvm.lower(schedule, args=[placeholder_a0, placeholder_a1, result_b0, result_b1])
fargmax = tvm.build(fapi, target=device, name="argmax")
np_idx = np.repeat(
np.arange(num_width, dtype="int32").reshape(1, num_width), num_height, axis=0
)
np_val = np.random.uniform(size=(num_height, num_width)).astype("float32")
np_res = np.argmax(np_val, axis=1)
nd_idx = tvm.nd.array(np_idx, dev)
nd_val = tvm.nd.array(np_val, dev)
nd_res0 = tvm.nd.array(np.zeros(num_height, dtype="int32"), dev)
nd_res1 = tvm.nd.array(np.zeros(num_height, dtype="float32"), dev)
fargmax(nd_idx, nd_val, nd_res0, nd_res1)
tvm.testing.assert_allclose(np_res, nd_res0.numpy())
check_target("cuda")
check_target("vulkan")
check_target("rocm")
@tvm.testing.requires_gpu
def test_warp_reduction1():
"""Test warp reductions."""
nthx = 32
nthy = 4
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, nthx), "threadIdx.x")
thread_y = te.thread_axis((0, nthy), "threadIdx.y")
def check_target(device, m, n):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
placeholder_a = te.placeholder((m, n), |
name="A")
axis_k = te.reduce_axis((0, n))
placeholder_b = te.compute(
(m,), lambda i: te.max(placeholder_a[i][axis_k], axis=axis_k), name="B"
)
schedule = te.create_schedule(placeholder_b.op)
axis_k = schedule[placeholder_b].op.reduce_axis[0]
axis_ko, _ = schedule[placeholder_b].split(axis_k, nparts=nthx)
schedule[placeholder_b].bind(axis_ko, thread_x)
axis_xo, axis_xi = schedule[placeholder_b].split(
schedule[placeholder_b].op.axis[0], factor=nthy
)
schedule[placeholder_b].bind(axis_xi, thread_y)
schedule[placeholder_b].bind(axis_xo, block_x)
tvm.lower(schedule, [placeholder_a, placeholder_b], simple_mode=True)
func = tvm.build(schedule, [placeholder_a, placeholder_b], device, name="warp_reduction")
a_np = np.random.uniform(size=(m, n)).astype(placeholder_a.dtype)
b_np = np.zeros((m,), dtype=placeholder_a.dtype)
buff_a = tvm.nd.array(a_np, dev)
buff_b = tvm.nd.array(b_np, dev)
b_np = np.max(a_np, axis=1)
func(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), b_np, rtol=1e-3, atol=1e-3)
check_target("cuda", m=32, n=256)
check_target("cuda", m=10, n=20)
check_target("rocm", m=32, n=256)
check_target("rocm", m=10, n=20)
@tvm.testing.requires_gpu
def test_warp_reduction2():
"""Test warp reductions."""
def fcombine(tensor1, tensor2):
return tensor1[0] + tensor2[0], tensor1[1] * tensor2[1]
def fidentity(tensor1, tensor2):
return tvm.tir.const(0, tensor1), tvm.tir.const(1, tensor2)
add_mul_reducer = te.comm_reducer(fcombine, fidentity, name="add_mul_reducer")
num_m = 16
num_n = 256
placeholder_a0 = te.placeholder((num_m, num_n), name="A0", dtype="float32")
placeholder_a1 = te.placeholder((num_m, num_n), name="Al", dtype="float32")
axis_k = te.reduce_axis((0, num_n), "k")
result0, result1 = te.compute(
(num_m |
,),
lambda i: add_mul_reducer(
(placeholder_a0[i, axis_k], placeholder_a1[i, axis_k]), axis=axis_k
),
name="T",
)
nthdx, nthdy = 32, 2
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, nthdx), "threadIdx.x")
thread_y = te.thread_axis((0, nthdy), "threadIdx.y")
def check_target(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
schedule = te.create_schedule(result0.op)
axis_ko, _ = schedule[result0].split(axis_k, nparts=nthdx)
axis_xo, axis_xi = schedule[result0].split(schedule[result0].op.axis[0], factor=nthdy)
schedule[result0].bind(axis_ko, thread_x)
schedule[result0].bind(axis_xi, thread_y)
schedule[result0].bind(axis_xo, block_x)
dev = tvm.device(device, 0)
a0_np = np.random.uniform(size=(num_m, num_n)).astype(placeholder_a0.dtype)
a1_np = np.random.uniform(size=(num_m, num_n)).astype(placeholder_a1.dtype)
t0_np = np.zeros((num_m,), dtype=placeholder_a0.dtype)
t1_np = np.zeros((num_m,), dtype=placeholder_a1.dtype)
buff_a0 = tvm.nd.array(a0_np, dev)
buff_a1 = tvm.nd.array(a1_np, dev)
buff_t0 = tvm.nd.array(t0_np, dev)
buff_t1 = tvm.nd.array(t1_np, dev)
func = tvm.build(
schedule, [placeholder_a0, placeholder_a1, result0, result1], device, name="reduction"
)
func(buff_a0, buff_a1, buff_t0, buff_t1)
t0_np = np.sum(a0_np, axis=1)
t1_np = np.product(a1_np, axis=1)
tvm.testing.assert_allclose(buff_t0.numpy(), t0_np, rtol=1e-3, atol=1e-3)
tvm.testing.assert_allclose(buff_t1.numpy(), t1_np, rtol=1e-3, atol=1e-3)
check_target("cuda")
check_target("rocm")
@tvm.testing.requires_cuda
def test_reduce_storage_reuse():
"""Test reduction reuses storage."""
target = tvm.target.Target("cuda")
de |
f run_passes(sch, args):
mod = schedule_to_module(sch, args)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", target))(mod)
return tvm.transform.Sequential(
[
tvm.tir.transform.StorageFlatten(64),
tvm.tir.transform.Simplify(),
tvm.tir.transform.StorageRewrite(),
tvm.tir.transform.LowerThreadAllreduce(),
]
)(mod)
dev = tvm.device(target.kind.name, 0)
shape = (16, 16)
placeholder_a = te.placeholder(shape, dtype="float32", name="A")
placeholder_b = topi.nn.softmax(placeholder_a, axis=1) + 1.0
with tvm.target.Target(target):
schedule = topi.cuda.schedule_softmax(placeholder_b)
mod = run_passes(schedule, [placeholder_a, placeholder_b])
def check_store_dst_remapped(op):
if isinstance(op, tvm.tir.Store):
assert op.buffer_var.name != "reduce_temp0"
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, check_store_dst_remapped)
inp = np.random.uniform(size=shape).astype("float32")
ref = tvm.topi.testing.softmax_python(inp) + 1.0
func = tvm.build(schedule, [placeholder_a, placeholder_b], target)
buff_a = tvm.nd.array(inp, dev)
buff_b = tvm.nd.array(np.zeros(shape, dtype=placeholder_b.dtype), dev)
func(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), ref, rtol=1e-5)
if __name__ == "__main__":
pytest.main([__file__]) |
"""Test scheduling adn running scan operators.""" |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import te
@tvm.testing.requires_gpu
def test_scan():
"""Test scan operators."""
size_var_m = te.size_var("m")
size_var_n = te.size_var("n")
placeholder_x = te.placeholder((size_var_m, size_var_n), name="X")
s_state = te.placeholder((size_var_m, size_var_n))
s_init = te.compute((1, size_var_n), lambda _, i: placeholder_x[0, i])
s_update = te.compute(
(size_var_m, size_var_n), lambda t, i: s_state[t - 1, i] + placeholder_x[t, i]
)
scan = tvm.te.scan(s_init, s_update, s_state)
res = te.compute((size_var_m, size_var_n), lambda i, j: scan[i, j])
schedule = te.create_schedule(res.op)
num_thread = 256
block_x = te.thread_axis(None, "blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
axis_xo, axis_xi = schedule[s_init].split(s_init.op.axis[1], factor=num_thread)
schedule[s_init].bind(axis_xo, block_x)
schedule[s_init].bind(axis_xi, thread_x)
axis_xo, axis_xi = schedule[s_update].split(s_update.op.axis[1], factor=num_thread)
schedule[s_update].bind(axis_xo, block_x)
schedule[s_update].bind(axis_xi, thread_x)
axis_xo, axis_xi = schedule[res].split(res.op.axis[1], factor=num_thread)
schedule[res].bind(axis_xo, block_x)
schedule[res].bind(axis_xi, thread_x)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fscan = tvm.build(schedule, [placeholder_x, res], device, name="myscan")
num_n = 1024
num_m = 10
a_np = np.random.uniform(size=(num_m, num_n)).astype(res.dtype)
buff_a = tvm.nd.array(a_np, dev)
buff_b = tvm.nd.array(np.zeros((num_m, num_n), dtype=res.dtype), dev)
fscan(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), np.cumsum(a_np, axis=0))
check_device("vulkan")
check_device("cuda")
check_device("metal")
check_device("opencl")
if __name_ |
_ == "__main__":
test_scan() |
import logging |
import tempfile
from typing |
import List, Optional |
import numpy as np |
import pytest |
import tvm
from tvm |
import meta_schedule as ms
from tvm |
import relay
from tvm.contrib |
import graph_executor
from tvm.meta_schedule.testing.relay_workload |
import get_network
from tvm.meta_schedule.testing.tune_utils |
import generate_input_data
from tvm.target.target |
import Target
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
@pytest.mark.skip("Integration test")
@pytest.mark.parametrize(
"model_name, input_shape, data_type, target, layout",
[
("resnet_18", [1, 3, 224, 224], "float32", "llvm --num-cores=12", "NHWC"),
("resnet_18", [1, 3, 224, 224], "float32", "nvidia/geforce-rtx-3070", "NHWC"),
],
)
def test_meta_schedule_tune_relay(
model_name: str,
input_shape: List[int],
data_type: str,
target: str,
layout: Optional[str],
):
dev = tvm.cpu() if str(target).startswith("llvm") else tvm.cuda()
data = generate_input_data(input_shape, data_type)
mod, params, (input_name, _, _) = get_network(
name=model_name,
input_shape=input_shape,
layout=layout,
)
target = Target(target)
with tempfile.TemporaryDirectory() as work_dir:
with ms.Profiler() as profiler:
database = ms.relay_integration.tune_relay(
mod=mod,
target=target,
params=params,
work_dir=work_dir,
max_trials_global=2048,
)
rt_mod1 = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=target,
params=params,
)
print(profiler.table())
def get_output(data, lib, dev):
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input(input_name, tvm.nd.array(data, device=dev))
module.run()
return module.get_output(0).numpy()
actual_output = get_output(data, rt_mod1, dev)
print(
f"{model_name} finished tuning and running on {Target(target).kind.name}. "
"Running baseline...",
flush=True,
)
baseline_target = "llvm -num-cores=1"
with tvm.transform.PassContext(opt_level=0): |
rt_mod2 = relay.build(mod, target=baseline_target, params=params)
expected_output = get_output(data, rt_mod2, tvm.cpu())
print(
f"Basline finished running on {Target(baseline_target).kind.name}. "
"Verifying correctness...",
flush=True,
)
assert np.allclose(actual_output, expected_output, rtol=1e-4, atol=2e-4)
print(
f"Correctness verified for {model_name} on {Target(target).kind.name}.",
flush=True,
)
if __name__ == """__main__""":
test_meta_schedule_tune_relay(
"resnet_18", [1, 3, 224, 224], "float32", "llvm --num-cores=12", "NHWC"
)
test_meta_schedule_tune_relay(
"resnet_18", [1, 3, 224, 224], "float32", "nvidia/geforce-rtx-3070", None
) |
"""Test winograd convolution using nnpack impl.""" |
import numpy as np
from pytest |
import skip |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.