text
stringlengths 1
2.05k
|
---|
import tvm
from tvm |
import te |
import tvm.testing |
import re
target = "opencl"
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_ternary_expression():
def check_if_then_else(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
true_value = tvm.tir.const(1, dtype=dtype)
false_value = tvm.tir.const(3, dtype=dtype)
max_lhs = tvm.tir.const(2, dtype=dtype)
max_rhs = tvm.tir.if_then_else(A[0] > 0, true_value, false_value)
C = te.compute((n,), lambda i: tvm.te.max(max_lhs, max_rhs), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
fun(a, c)
def check_select(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
true_value = tvm.tir.const(1, dtype=dtype)
false_value = tvm.tir.const(3, dtype=dtype)
max_lhs = tvm.tir.const(2, dtype=dtype)
max_rhs = tvm.tir.Select(A[0] > 0, true_value, false_value)
C = te.compute((n,), lambda i: tvm.te.max(max_lhs, max_rhs), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
fun(a, c)
dev = tvm.device(target, 0)
check_if_then_else(dev, 1, "int8")
check_if_then_else(dev, 1, "uint8")
check_if_then_else(dev, 1, "int16")
check_if_then_else(dev, 1, "uint16")
check_select(dev, 1, "int8")
check_select(dev, 1, "uint8")
check_select(dev, 1, "int16")
check_select(dev, 1, "uint16")
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_inf_nan():
def check_inf_nan(dev, n, value, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lam |
bda i: inf_value, name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
fun(a, c)
dev = tvm.device(target, 0)
check_inf_nan(dev, 1, -float("inf"), "float32")
check_inf_nan(dev, 1, -float("inf"), "float64")
check_inf_nan(dev, 1, float("inf"), "float32")
check_inf_nan(dev, 1, float("inf"), "float64")
check_inf_nan(dev, 1, float("nan"), "float32")
check_inf_nan(dev, 1, float("nan"), "float64")
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_max():
def check_max(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
max_lhs = A[0] + tvm.tir.const(1, dtype=dtype)
max_rhs = tvm.tir.const(0, dtype=dtype)
C = te.compute((n,), lambda i: tvm.te.max(max_lhs, max_rhs), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
fun(a, c)
dev = tvm.device(target, 0)
check_max(dev, 1, "int8")
check_max(dev, 1, "uint8")
check_max(dev, 1, "int16")
check_max(dev, 1, "uint16")
check_max(dev, 1, "float32")
check_max(dev, 1, "float64")
def test_opencl_erf():
def check_erf(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
C = te.compute(A.shape, lambda *i: te.erf(A(*i)), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
source_str = fun.imported_modules[0].get_source()
matches = re.findall("erf", source_str)
error_matches = re.findall("erff", source_str)
assert len(matches) == 1 and len(error_matches) == 0
dev = tvm.device(ta |
rget, 0)
check_erf(dev, 1, "float32")
check_erf(dev, 1, "float64")
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_type_casting():
def check_type_casting(ctx, n, dtype):
block_size = 4
C = te.compute(
(n,),
lambda i: tvm.tir.Select(
tvm.tir.all(
*[
i
i % block_size == tvm.tir.const(3, "int32"),
]
),
tvm.tir.const(1, dtype),
tvm.tir.const(0, dtype),
),
name="C",
)
s = te.create_schedule(C.op)
(tx, vx) = s[C].split(s[C].op.axis[0], factor=block_size)
s[C].vectorize(vx)
thrx = te.thread_axis("threadIdx.x")
s[C].bind(tx, thrx)
fun = tvm.build(s, [C], target)
c = tvm.nd.empty((n,), dtype, ctx)
assembly = fun.imported_modules[0].get_source()
false_branch = "((float4)(0.000000e+00f, 0.000000e+00f, 0.000000e+00f, 0.000000e+00f))"
true_branch = "((float4)(1.000000e+00f, 1.000000e+00f, 1.000000e+00f, 1.000000e+00f))"
lcond = "(convert_uint4(((uint4)((((int)get_local_id(0)) == 3), (((int)get_local_id(0)) == 3), (((int)get_local_id(0)) == 3), (((int)get_local_id(0)) == 3)))))"
rcond = "(convert_uint4((((int4)((0)+(1*0), (0)+(1*1), (0)+(1*2), (0)+(1*3))) == ((int4)(3, 3, 3, 3)))))"
cond = "({} && {})".format(lcond, rcond)
select = "select({}, {}, {})".format(false_branch, true_branch, cond)
count = assembly.count(select)
assert count == 1
fun(c)
dev = tvm.device(target, 0)
check_type_casting(dev, 16, "float32")
if __name__ == "__main__":
test_opencl_ternary_expression()
test_opencl_inf_nan()
test_opencl_max()
test_opencl_erf()
test_opencl_type_casting() |
import tvm |
import tvm.testing
from tvm |
import te |
import numpy as np |
import unittest
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
@tvm.testing.requires_rocm
def test_rocm_cross_thread_reduction():
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
s = te.create_schedule(B.op)
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
BF = s.rfactor(B, ki)
xo, xi = s[B].split(s[B].op.axis[0], factor=32)
s[B].bind(xo, bx)
s[B].bind(xi, ty)
s[B].bind(s[B].op.reduce_axis[0], tx)
s[BF].compute_at(s[B], s[B].op.reduce_axis[0])
s[B].set_store_predicate(tx.var.equal(0))
frocm = tvm.build(s, [A, B], "rocm")
nn = 128
dev = tvm.rocm(0)
a = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev)
frocm(a, b)
tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-4)
@tvm.testing.requires_rocm
def test_rocm_inf_nan():
def check_inf_nan(dev, n, value, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], "rocm")
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
fun(a, c)
dev = tvm.rocm(0)
check_inf_nan(dev, 1, -float("inf"), "float32")
check_inf_nan(dev, 1, -float("inf"), "float64")
check_inf_nan(dev, 1, float("inf"), "float32")
check_inf_nan(dev, 1, float("inf"), "float64")
check_inf_nan(dev, 1, float("nan"), "float32")
check_inf_nan(dev, 1, float("nan"), "float64")
@tvm.testing.requires_rocm
def test_rocm_reduction_binding():
k = te.reduce_axis((0, 32), "k")
A = te.placeholder((96, 3 |
2), name="A")
B = te.compute((96,), lambda m: te.sum(A[m, k], axis=k), name="B")
s = te.create_schedule(B.op)
s[B].reorder(B.op.reduce_axis[0], B.op.axis[0])
mo, _ = s[B].split(B.op.axis[0], 32)
s[B].bind(mo, bx)
@tvm.testing.requires_rocm
def test_rocm_copy():
def check_rocm(dtype, n):
A = te.placeholder((n,), name="A", dtype=dtype)
dev = tvm.rocm(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(a_np)
b_np = a.numpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.numpy())
for _ in range(100):
dtype = np.random.choice(["float32", "float16", "int8", "int32"])
logN = np.random.randint(1, 15)
peturb = np.random.uniform(low=0.5, high=1.5)
check_rocm(dtype, int(peturb * (2**logN)))
@tvm.testing.requires_rocm
def test_rocm_vectorize_add():
num_thread = 8
def check_rocm(dtype, n, lanes):
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "rocm")
dev = tvm.rocm(0)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, dev)
fun(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
check_rocm("float32", 64, 2)
check_rocm("float16", 64, 2)
if __name__ == "__main__":
test_rocm_cross_thread_reduction()
test_rocm_inf_nan()
test_rocm_reduction_binding()
test_rocm_copy()
test_rocm_vectorize_add() |
import tvm
from tvm |
import te |
import ctypes |
import numpy as np
def test_static_callback():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.size_var("i")
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
cp = te.thread_axis((0, 1), "cop")
finit = tvm.tir.StringImm("TVMBackendRunOnce")
ib.scope_attr(cp, "coproc_uop_scope", finit)
with ib.for_range(0, n, "i", kind="parallel") as i:
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "ramp"))
f = tvm.driver.build(mod, target="llvm")
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
f(a)
np.testing.assert_equal(a.numpy(), np.ones(a.shape[0]))
def test_static_init():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.size_var("i")
ib = tvm.tir.ir_builder.create()
handle = tvm.tir.call_intrin("handle", "tir.tvm_static_handle")
ib.emit(tvm.tir.call_packed("test_static_callback", handle, Ab))
@tvm.register_func("test_static_callback")
def test_cb(sh, A):
assert isinstance(sh, ctypes.c_void_p)
return sh
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "ramp"))
f = tvm.driver.build(mod, target="llvm")
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
if __name__ == "__main__":
test_static_callback()
test_static_init() |
import tvm |
import tvm.testing
from tvm |
import te |
import numpy as np
def run_jit(fapi, check):
for target in ["llvm", "stackvm"]:
if not tvm.testing.device_enabled(target):
continue
f = tvm.driver.build(fapi, target=target)
s = f.get_source()
check(f)
def test_stack_vm_basic():
a = tvm.nd.array(np.zeros(10, dtype="float32"))
@tvm.register_func
def tvm_call_back_get_shape(shape0):
print(shape0)
assert shape0 == a.shape[0]
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), "float32")
stmt = tvm.tir.Evaluate(tvm.tir.call_packed("tvm_call_back_get_shape", Ab.shape[0]))
mod = tvm.IRModule.from_expr(
tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "print_shape")
)
run_jit(mod, lambda f: f(a))
@tvm.register_func
def tvm_stack_vm_print(*x):
print(x)
def test_stack_vm_loop():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.size_var("i")
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n - 1, "i") as i:
A[i + 1] = A[i] + 1
ib.emit(tvm.tir.call_packed("tvm_stack_vm_print", i))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "ramp"))
a = tvm.nd.array(np.zeros(10, dtype=dtype))
def check(f):
f(a)
np.testing.assert_equal(a.numpy(), np.arange(a.shape[0]))
run_jit(mod, check)
def test_stack_vm_cond():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n - 1, "i") as i:
with ib.if_scope(tvm.tir.EQ(i, 4)):
A[i + 1] = A[i] + 1
with ib.else_scope():
A[i + 1] = A[i] + 2
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "test"))
def check(f):
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
y = np.arange(a.s |
hape[0]) * 2
y[5:] -= 1
np.testing.assert_equal(a.numpy(), y)
run_jit(mod, check)
def test_vm_parallel():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.size_var("i")
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n, "i", kind="parallel") as i:
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "test"))
def check(f):
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
np.testing.assert_equal(a.numpy(), np.ones(a.shape[0]))
run_jit(mod, check)
if __name__ == "__main__":
test_vm_parallel()
test_stack_vm_loop()
test_stack_vm_basic()
test_stack_vm_cond() |
import os
from posixpath |
import split |
import random |
import re |
import threading |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import relay, te
from tvm.topi.math |
import cast
dtype = tvm.testing.parameter("float32", "int32", "float16", "int8")
fuzz_seed = tvm.testing.parameter(range(25))
@tvm.testing.parametrize_targets(
" ".join(
[
"vulkan",
"-supports_int8=1",
"-supports_8bit_buffer=1",
"-supports_storage_buffer_storage_class=1",
"-supports_float16=1",
"-supports_16bit_buffer=1",
]
)
)
def test_vector_comparison(target, dtype):
n = (1024,)
A = te.placeholder(n, dtype=dtype, name="A")
B = te.compute(
A.shape,
lambda i: tvm.tir.Select(
A[i] >= 0, A[i] + tvm.tir.const(1, dtype), tvm.tir.const(0, dtype)
),
name="B",
)
s = te.create_schedule(B.op)
(bx, tx) = s[B].split(s[B].op.axis[0], factor=128)
(tx, vx) = s[B].split(tx, factor=4)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
s[B].vectorize(vx)
f = tvm.build(s, [A, B], target)
assembly = f.imported_modules[0].get_source()
matches = re.findall("%v4bool = OpTypeVector %bool 4", assembly)
assert len(matches) == 1
matches = re.findall("OpSelect %v4.*", assembly)
assert len(matches) == 1
def test_array_copy(dev, dtype, fuzz_seed):
np.random.seed(fuzz_seed)
log_arr_size = np.random.uniform(low=np.log(1), high=np.log(32768))
arr_size = np.exp(log_arr_size).astype(int)
a_np = np.random.uniform(size=(arr_size,)).astype(dtype)
a = tvm.nd.empty((arr_size,), dtype, dev).copyfrom(a_np)
b_np = a.numpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.numpy())
@tvm.testing.exclude_targets("llvm")
def test_array_vectorize_add(target, dev, dtype):
arr_size = 64
lanes = 2
if "opencl" in target and dtype == "float16":
pytest.xfail("Opencl target does not support float16")
num_thread = 8
A = te.placeholder((arr_size,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((arr_size,) |
, lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, B], target)
a = tvm.nd.empty((arr_size,), A.dtype, dev).copyfrom(np.random.uniform(size=(arr_size, lanes)))
c = tvm.nd.empty((arr_size,), B.dtype, dev)
fun(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
@tvm.testing.parametrize_targets("vulkan")
@pytest.mark.skip("Flaky, https:
def test_vulkan_stress(target, dev):
"""
Launch a randomized test with multiple kernels per stream, multiple uses of
kernels per stream, over multiple threads.
"""
n = 1024
num_thread = 64
def run_stress():
def worker():
A = te.placeholder((n,), name="A", dtype="float32")
B = te.placeholder((n,), name="B", dtype="float32")
functions = [
(
lambda: te.compute((n,), lambda i: 2 * A[i] + 3 * B[i]),
lambda a, b: 2 * a + 3 * b,
),
(lambda: te.compute((n,), lambda i: A[i] + B[i]), lambda a, b: a + b),
(lambda: te.compute((n,), lambda i: A[i] + 2 * B[i]), lambda a, b: a + 2 * b),
]
def build_f(f_ref):
(C_f, ref) = f_ref
C = C_f()
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(xi, te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, B, C], target)
return (fun, ref)
fs = [
build_f(random.choice(functions)) for _ in range(np.random.randint(low=1, high=10))
]
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
b = tvm.nd.empty((n,), B.dtype, dev).copyfrom( |
np.random.uniform(size=(n,)))
cs = [tvm.nd.empty((n,), A.dtype, dev) for _ in fs]
for ((f, _), c) in zip(fs, cs):
f(a, b, c)
for ((_, ref), c) in zip(fs, cs):
tvm.testing.assert_allclose(c.numpy(), ref(a.numpy(), b.numpy()))
ts = [threading.Thread(target=worker) for _ in range(np.random.randint(1, 10))]
for t in ts:
t.start()
for t in ts:
t.join()
run_stress()
@tvm.testing.exclude_targets("llvm")
def test_vulkan_bool_load(target, dev):
arr_size = 1024
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
supports_int8_buffer = target.attrs.get("supports_int8", False) and target.attrs.get(
"supports_8bit_buffer", False
)
if not supports_int8_buffer:
pytest.xfail(
"Vulkan target does not support int8 buffer access, used to transfer booleans"
)
def do_copy(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
max_threads = 32
ib.scope_attr(bx, "thread_extent", tvm.tir.indexdiv(n + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < n):
B[tid] = cast(A[tid], "int32")
return ib.get()
A = te.placeholder((arr_size,), name="A", dtype="bool")
B = te.placeholder((arr_size,), name="B", dtype="int32")
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_copy(ins[0], outs[0], arr_size),
name="bool_copy_ir",
dtype="int32",
)
s = te.create_schedule(B.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B], target)
a_np = np.random.uniform(size=arr_size) > 0.5
b_np = np.zeros((arr_size,), dtype="int32")
a = tvm.nd.array( |
a_np, dev)
b = tvm.nd.array(b_np, dev)
func(a, b)
ref = a_np.astype(np.int32)
tvm.testing.assert_allclose(b.numpy(), ref)
def check_mod(target, dev, mod, x_np, res_np):
res = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(x_np).numpy()
tvm.testing.assert_allclose(res, res_np, atol=1e-5)
def test_sqrt(target, dev):
dtype = "float32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.sqrt(x))
x_np = np.random.uniform(size=(10,)).astype(dtype)
res_np = np.sqrt(x_np)
check_mod(target, dev, mod, x_np, res_np)
def test_argsort(target, dev):
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.argsort(x))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.argsort(x_np)
check_mod(target, dev, mod, x_np, res_np)
def test_cumsum(target, dev):
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.cumsum(x))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.cumsum(x_np)
check_mod(target, dev, mod, x_np, res_np)
@tvm.testing.skip_if_wheel_test
def test_unique(target, dev):
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
[unique, _, _, num_unique] = relay.unique(x, is_sorted=True)
mod["main"] = relay.Function([x], relay.op.strided_slice(unique, begin=[0], end=num_unique))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.unique(x_np)
check_mod(target, dev, mod, x_np, res_np)
vulkan_parameter_impl = tvm.testing.parameter("push_constants", "ubo")
vulkan_parameter_dtype = tvm.testing.parameter("int32", "float32", "int64")
@tvm.testing.parametrize_targets("vulkan -from_device=0")
def test_vulkan_constant |
_passing(target, dev, vulkan_parameter_impl, vulkan_parameter_dtype):
target = tvm.target.Target(target)
dtype = vulkan_parameter_dtype
if not target.attrs.get("supports_int64", False):
pytest.xfail("Vulkan target does not support Int64 variables")
if vulkan_parameter_impl == "push_constants":
num_int_params = 1
else:
max_push_constants_size = int(target.attrs.get("max_push_constants_size", 128))
max_int_params_in_push = max_push_constants_size
num_int_params = max_int_params_in_push + 1
n = te.var("n")
scalars = [te.var("scale{}".format(i), dtype=dtype) for i in range(num_int_params)]
scalar_sum = scalars[0]
for s in scalars[1:]:
scalar_sum += s
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.compute(A.shape, lambda i: scalar_sum + A[i], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
f_add = tvm.build(s, scalars + [A, B], target)
n = 1024
scalars = np.array([1 for _ in scalars]).astype(dtype)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
f_add(*scalars, a, b)
tvm.testing.assert_allclose(a.numpy() + sum(scalars), b.numpy())
def test_vulkan_while_if(target, dev):
target = tvm.target.Target(target)
def do_compute(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
if "gpu" in target.keys:
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
iterations = ib.allocate("int32", (1,), name="iterations", scope="local")
iterations[0] = 0
B[0] = 0
loop_condition = iterations[0] < tvm.tir.if_then_else(A[0] > 0, 10, 20)
with ib.while_loop(loop_condition): |
iterations[0] += 1
B[0] += iterations[0]
return ib.get()
n = 1
dtype = "int32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_compute(ins[0], outs[0], n),
dtype=dtype,
)
s = te.create_schedule(B.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B], target)
a = tvm.nd.array(np.array([5], dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), [55])
a = tvm.nd.array(np.array([-5], dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), [210])
@tvm.testing.exclude_targets("llvm")
def test_vulkan_local_threadidx(target, dev):
def do_compute(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
tx = te.thread_axis("threadIdx.x")
with ib.for_range(0, 1):
ib.scope_attr(tx, "thread_extent", 16)
B[tx + 0] = A[tx + 0]
with ib.for_range(0, 1):
ib.scope_attr(tx, "thread_extent", 16)
B[tx + 16] = A[tx + 16]
return ib.get()
n = te.var("n")
A = te.placeholder((n,), name="A", dtype="int32")
B = te.placeholder((n,), name="B", dtype="int32")
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_compute(ins[0], outs[0], n),
dtype="int32",
)
s = te.create_schedule(B.op)
func = tvm.build(s, [A, B], target)
n = 32
a_np = np.arange(n).astype(dtype=A.dtype)
b_np = np.zeros((n,), dtype="int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), a_np)
class TestVectorizedIndices:
load_type, store_type |
= tvm.testing.parameters(
("ramp", "ramp"),
("broadcast", "ramp"),
("broadcast", "broadcast"),
)
indirect_indices = tvm.testing.parameter(True, False, ids=["reorder", "no_reorder"])
@tvm.testing.fixture
def ref_data(self, load_type, store_type, indirect_indices):
n = 4
index_map = {
"ramp": np.arange(n),
"broadcast": np.zeros(n, dtype="int32"),
}
a_np = np.random.randint(np.iinfo("int32").max, size=n).astype("int32")
b_np = np.zeros(shape=n, dtype=a_np.dtype)
reorder_np = np.arange(n, dtype="int32")[::-1]
load_index = index_map[load_type]
store_index = index_map[store_type]
if indirect_indices:
load_index = reorder_np[load_index]
b_np[store_index] = a_np[load_index]
return a_np, reorder_np, b_np
@tvm.testing.fixture
def mod(self, target, load_type, store_type, indirect_indices):
target = tvm.target.Target(target)
n = 4
dtype = "int32"
A = te.placeholder((n,), dtype=dtype, name="A")
R = te.placeholder((n,), dtype=dtype, name="R")
def do_compute(ins, outs):
ib = tvm.tir.ir_builder.create()
A, R = map(ib.buffer_ptr, ins)
B = ib.buffer_ptr(outs[0])
if "gpu" in target.keys:
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
index_map = {
"ramp": tvm.tir.Ramp(0, 1, 4),
"broadcast": tvm.tir.Broadcast(0, 4),
}
load_index = index_map[load_type]
store_index = index_map[store_type]
if indirect_indices:
load_index = R[load_index]
B[store_index] = A[load_index]
return ib.get()
B = te.extern(A.shape, [A, R], do_compute, dtype="int32")
s = te.create_s |
chedule(B.op)
return tvm.lower(s, [A, R, B])
def test_ramp_broadcast_index(self, target, dev, mod, ref_data):
f = tvm.build(mod, target=target)
a_np, reorder_np, b_np = ref_data
a = tvm.nd.array(a_np, dev)
r = tvm.nd.array(reorder_np, dev)
b = tvm.nd.array(np.zeros(shape=b_np.shape, dtype="int32"), dev)
f(a, r, b)
tvm.testing.assert_allclose(b.numpy(), b_np)
@tvm.testing.parametrize_targets("vulkan -max_shared_memory_per_block=16384")
def test_shared_mem_alloc(target, dev):
alloc_nbytes = 16384 * 2
def do_compute(ins, outs):
ib = tvm.tir.ir_builder.create()
out = ib.buffer_ptr(outs[0])
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
array = ib.allocate("int32", (alloc_nbytes,), name="array", scope="shared")
array[0] = 0
out[0] = array[0]
return ib.get()
Out = te.extern(
shape=(1,),
inputs=[],
fcompute=do_compute,
dtype="int32",
)
s = te.create_schedule(Out.op)
with pytest.raises(tvm.TVMError):
tvm.build(s, [Out], target)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import platform |
import pytest |
import re |
import textwrap |
import tvm
from tvm |
import te
llvm_version = tvm.target.codegen.llvm_version_major()
machine = platform.machine()
if machine not in ["i386", "x86_64", "AMD64", "amd64"]:
pytest.skip(f"Requires x86_64/i386, but machine is {machine}", allow_module_level=True)
@tvm.testing.requires_llvm
@pytest.mark.skipif(llvm_version < 6, reason=f"Requires LLVM 6+, got {llvm_version}")
def test_fp16_to_fp32():
def fp16_to_fp32(target, width, match=None, not_match=None):
elements = 64
n = tvm.runtime.convert(elements)
A = te.placeholder((n, width), dtype="float16", name="A")
B = te.compute(A.shape, lambda *i: A(*i).astype("float32"), name="B")
s = te.create_schedule(B.op)
s[B].vectorize(s[B].op.axis[1])
f = tvm.build(s, [A, B], target)
assembly = f.get_source("asm").splitlines()
if match:
matches = [l for l in assembly if re.search(match, l)]
assert matches
if not_match:
not_matches = [l for l in assembly if re.search(not_match, l)]
assert not not_matches
fp16_to_fp32("llvm -mcpu=skylake-avx512", 15, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=skylake-avx512", 16, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=skylake-avx512", 17, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=skylake-avx512", 49, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=skylake-avx512 -mattr=-avx512f", 49, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=skylake-avx512 -mattr=-f16c,-avx512f", 49, not_match="vcvtph2ps")
fp16_to_fp32("llvm -mcpu=core-avx2", 8, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=core-avx2", 9, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm", 9, not_match="vcvtph2ps")
is_32bit = platform.architecture()[0] == "32bit"
@tvm.testing.requires_llvm
@pytest.mark.skipif(is_32bit, reason=f"Fails in CI due to architecture mismatch in JIT")
@pytest.mark.parametrize("feature_string", ["-sse2", "+sse2"])
def test_fp16_fp32_conversions(feature_string):
relay_model = textwrap.dedent( |
"""
def @main(%inp : Tensor[(3), float32], %cst : Tensor[(3), float32]) {
%1 = cast(%inp, dtype="float16");
%2 = cast(%cst, dtype="float16");
%3 = add(%1, %2);
%4 = cast(%3, dtype="float32");
%4
}
"""
)
ir_mod = tvm.parser.fromtext(relay_model)
arch = "i386" if machine == "i386" else "x86_64"
aot_factory = tvm.relay.build(
ir_mod,
params={"cst": np.array([1.0, 2.0, 3.0], dtype="float32")},
target=f"llvm --mtriple={arch} --mattr={feature_string}",
executor=tvm.relay.backend.Executor(
"aot", {"interface-api": "packed", "unpacked-api": False}
),
)
mod_name = aot_factory["list_module_names"]()[0]
executor = aot_factory[mod_name]
mod = executor(tvm.cpu(0))
inp = tvm.nd.array(np.array([1.1, 2.1, 3.1], dtype="float32"), device=tvm.cpu(0))
mod.get_function("set_input")(0, inp)
mod.get_function("run")()
out = mod.get_function("get_output")(0)
expected = np.array([2.1, 4.1, 6.1], dtype="float32")
np.testing.assert_allclose(out.asnumpy(), expected, rtol=1e-3)
if __name__ == "__main__":
test_fp16_to_fp32() |
"""
Tests to verify Python interactions with Target Parsing
""" |
import pytest
from tvm.target |
import Target
@pytest.mark.parametrize(["cpu_target"], [["c"], ["llvm"]])
def test_target_parser_mprofile(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m55")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert parsed_target.features.has_dsp
assert parsed_target.features.has_mve
@pytest.mark.parametrize(["cpu_target"], [["c"], ["llvm"]])
def test_target_parser_mprofile_no_mve(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m7")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert parsed_target.features.has_dsp
assert not parsed_target.features.has_mve
@pytest.mark.parametrize(["cpu_target"], [["c"], ["llvm"]])
def test_target_parser_mprofile_no_dsp(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m3")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert not parsed_target.features.has_dsp
assert not parsed_target.features.has_mve
@pytest.mark.parametrize(["cpu_target"], [["llvm"]])
def test_target_parser_mprofile_mattr(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m55 -mattr=+nomve,+woof")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert parsed_target.features.has_dsp
assert not parsed_target.features.has_mve
if __name__ == "__main__":
tvm.testing.main() |
import json |
import pytest |
import tvm |
import tvm.testing
from tvm.target |
import Target, arm_cpu, bifrost, cuda, intel_graphics, mali, rocm, vta
@tvm.target.generic_func
def mygeneric(data):
return data + 1
@mygeneric.register(["cuda", "gpu"])
def cuda_func(data):
return data + 2
@mygeneric.register("rocm")
def rocm_func(data):
return data + 3
@mygeneric.register("cpu")
def rocm_func(data):
return data + 10
def test_all_targets_device_type_verify():
"""Consistency verification for all targets' device type"""
all_targets = [tvm.target.Target(t) for t in tvm.target.Target.list_kinds()]
for tgt in all_targets:
relay_to_tir = tgt.get_kind_attr("RelayToTIR")
tir_to_runtime = tgt.get_kind_attr("TIRToRuntime")
is_external_codegen = tgt.get_kind_attr("is_external_codegen")
if relay_to_tir is not None or tir_to_runtime is not None or is_external_codegen:
continue
if tgt.kind.name not in tvm._ffi.runtime_ctypes.Device.STR2MASK:
raise KeyError("Cannot find target kind: %s in Device.STR2MASK" % tgt.kind.name)
assert (
tgt.get_target_device_type() == tvm._ffi.runtime_ctypes.Device.STR2MASK[tgt.kind.name]
)
def test_target_dispatch():
with tvm.target.cuda():
assert mygeneric(1) == 3
assert mygeneric.get_packed_func()(1) == 3
with tvm.target.rocm():
assert mygeneric(1) == 4
assert mygeneric.get_packed_func()(1) == 4
with tvm.target.Target("cuda"):
assert mygeneric(1) == 3
assert mygeneric.get_packed_func()(1) == 3
with tvm.target.arm_cpu():
assert mygeneric(1) == 11
assert mygeneric.get_packed_func()(1) == 11
with tvm.target.Target("metal"):
assert mygeneric(1) == 3
assert mygeneric.get_packed_func()(1) == 3
assert tvm.target.Target.current() is None
@tvm.target.override_native_generic_func("test_target_temp_strategy")
def target_generic(data):
return data + 1
@target_generic.register(["cuda", "gpu"])
def target_cuda_func(data):
r |
eturn data + 2
def temp_target_cuda_func(data):
return data + 3
def test_target_temp_strategy(): |
class TempStrategy(object):
def __init__(self, name, target, fstrategy):
generic_fstrategy = tvm.target.get_native_generic_func(name)
self.target = target
self.name = name
self.origin_func = {}
with tvm.target.Target(target) as target_obj:
for tgt_key in target_obj.keys:
self.origin_func[tgt_key] = generic_fstrategy.get_packed_func()
generic_fstrategy.register(fstrategy, tgt_key, allow_override=True)
def __enter__(self):
return self
def __exit__(self, typ, value, traceback):
generic_fstrategy = tvm.target.get_native_generic_func(self.name)
with tvm.target.Target(self.target) as target_obj:
for tgt_key in target_obj.keys:
generic_fstrategy.register(
self.origin_func[tgt_key], tgt_key, allow_override=True
)
with tvm.target.Target("cuda"):
assert target_generic(1) == 3
with TempStrategy("test_target_temp_strategy", "cuda", temp_target_cuda_func):
with tvm.target.Target("cuda"):
assert target_generic(1) == 4
with tvm.target.Target("cuda"):
assert target_generic(1) == 3
def test_target_string_parse():
target = tvm.target.Target("cuda -model=unknown -libs=cublas,cudnn")
assert target.kind.name == "cuda"
assert target.model == "unknown"
assert set(target.keys) == set(["cuda", "gpu"])
assert set(target.libs) == set(["cublas", "cudnn"])
assert str(target) == str(tvm.target.cuda(options="-libs=cublas,cudnn"))
assert tvm.target.intel_graphics().device_name == "intel_graphics"
assert tvm.target.mali().device_name == "mali"
assert tvm.target.arm_cpu().device_name == "arm_cpu"
def test_target_string_with_spaces():
target = tvm.target.Target(
"vulkan -device_name='Name of GPU with spaces' -device_type=discrete"
)
assert target.attrs["device_name"] == "Name of |
GPU with spaces"
assert target.attrs["device_type"] == "discrete"
target = tvm.target.Target(str(target))
assert target.attrs["device_name"] == "Name of GPU with spaces"
assert target.attrs["device_type"] == "discrete"
def test_target_llvm_options():
target = tvm.target.Target("llvm -cl-opt='-unroll-threshold:uint=100,-unroll-count:uint=3'")
assert sorted(target.attrs["cl-opt"]) == sorted(
["-unroll-threshold:uint=100", "-unroll-count:uint=3"]
)
def test_target_create():
targets = [cuda(), rocm(), mali(), intel_graphics(), arm_cpu("rk3399"), vta(), bifrost()]
for tgt in targets:
assert tgt is not None
def test_target_config():
"""
Test that constructing a target from a dictionary works.
"""
target_config = {
"kind": "llvm",
"keys": ["arm_cpu", "cpu"],
"device": "arm_cpu",
"libs": ["cblas"],
"mfloat-abi": "hard",
"mattr": ["+neon", "-avx512f"],
}
target_config_str = json.dumps(target_config)
for config in [target_config, target_config_str]:
target = tvm.target.Target(config)
assert target.kind.name == "llvm"
assert all([key in target.keys for key in ["arm_cpu", "cpu"]])
assert target.device_name == "arm_cpu"
assert target.libs == ["cblas"]
assert target.attrs["mfloat-abi"] == "hard"
assert all([attr in target.attrs["mattr"] for attr in ["+neon", "-avx512f"]])
def test_config_map():
"""
Confirm that constructing a target with invalid
attributes fails as expected.
"""
target_config = {"kind": "llvm", "libs": {"a": "b", "c": "d"}}
with pytest.raises(ValueError):
tvm.target.Target(target_config)
def test_composite_target():
tgt = tvm.target.Target("composite --host=llvm --devices=cuda,opencl")
assert tgt.kind.name == "composite"
assert tgt.host.kind.name == "llvm"
assert len(tgt.attrs["devices"]) == 2
cuda_device, opencl_device = tgt.attrs["devices"]
assert cu |
da_device.kind.name == "cuda"
assert opencl_device.kind.name == "opencl"
def test_target_tag_0():
tgt = tvm.target.Target("nvidia/geforce-rtx-2080-ti")
assert tgt.kind.name == "cuda"
assert tgt.attrs["arch"] == "sm_75"
assert tgt.attrs["max_shared_memory_per_block"] == 49152
assert tgt.attrs["max_threads_per_block"] == 1024
assert tgt.attrs["thread_warp_size"] == 32
assert tgt.attrs["registers_per_block"] == 65536
def test_target_tag_1():
tgt = tvm.target.Target("nvidia/jetson-nano")
assert tgt.kind.name == "cuda"
assert tgt.attrs["arch"] == "sm_53"
assert tgt.attrs["max_shared_memory_per_block"] == 49152
assert tgt.attrs["max_threads_per_block"] == 1024
assert tgt.attrs["thread_warp_size"] == 32
assert tgt.attrs["registers_per_block"] == 32768
def test_list_kinds():
targets = tvm.target.Target.list_kinds()
assert len(targets) != 0
assert "llvm" in targets
assert all(isinstance(target_name, str) for target_name in targets)
def test_target_host_tags():
tgt = tvm.target.Target("nvidia/jetson-nano", "nvidia/geforce-rtx-2080-ti")
assert tgt.kind.name == "cuda"
assert tgt.attrs["arch"] == "sm_53"
assert tgt.attrs["max_shared_memory_per_block"] == 49152
assert tgt.attrs["max_threads_per_block"] == 1024
assert tgt.attrs["thread_warp_size"] == 32
assert tgt.attrs["registers_per_block"] == 32768
assert tgt.host.kind.name == "cuda"
assert tgt.host.attrs["arch"] == "sm_75"
assert tgt.host.attrs["max_shared_memory_per_block"] == 49152
assert tgt.host.attrs["max_threads_per_block"] == 1024
assert tgt.host.attrs["thread_warp_size"] == 32
assert tgt.host.attrs["registers_per_block"] == 65536
def test_target_host_tag_dict():
tgt = tvm.target.Target("nvidia/jetson-nano", {"kind": "llvm"})
assert tgt.kind.name == "cuda"
assert tgt.attrs["arch"] == "sm_53"
assert tgt.attrs["max_shared_memory_per_block"] == 49152
assert tgt.attrs["max_threads_per_block"] == 1024
assert tgt. |
attrs["thread_warp_size"] == 32
assert tgt.attrs["registers_per_block"] == 32768
assert tgt.host.kind.name == "llvm"
def test_target_host_single_dict():
tgt = tvm.target.Target({"kind": "llvm", "host": "nvidia/jetson-nano"})
assert tgt.kind.name == "llvm"
assert tgt.host.kind.name == "cuda"
assert tgt.host.attrs["arch"] == "sm_53"
assert tgt.host.attrs["max_shared_memory_per_block"] == 49152
assert tgt.host.attrs["max_threads_per_block"] == 1024
assert tgt.host.attrs["thread_warp_size"] == 32
assert tgt.host.attrs["registers_per_block"] == 32768
def test_target_host_single_string():
tgt = tvm.target.Target("cuda --host llvm")
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
def test_target_host_single_string_with_tag():
tgt = tvm.target.Target("cuda --host nvidia/jetson-nano")
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "cuda"
assert tgt.host.attrs["arch"] == "sm_53"
assert tgt.host.attrs["max_shared_memory_per_block"] == 49152
assert tgt.host.attrs["max_threads_per_block"] == 1024
assert tgt.host.attrs["thread_warp_size"] == 32
assert tgt.host.attrs["registers_per_block"] == 32768
def test_target_host_merge_0():
tgt = tvm.target.Target(tvm.target.Target("cuda --host nvidia/jetson-nano"), None)
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "cuda"
assert tgt.host.attrs["arch"] == "sm_53"
assert tgt.host.attrs["max_shared_memory_per_block"] == 49152
assert tgt.host.attrs["max_threads_per_block"] == 1024
assert tgt.host.attrs["thread_warp_size"] == 32
assert tgt.host.attrs["registers_per_block"] == 32768
def test_target_host_merge_1():
tgt = tvm.target.Target("cuda --host llvm")
tgt = tvm.target.Target(tgt, tgt.host)
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
def test_target_host_merge_2():
"""Test picking the same host is ok."""
tgt = tvm.target.Target(tvm.target.Target("cuda --host llvm"), tvm. |
target.Target("llvm"))
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
def test_target_tvm_object():
"""Test creating Target by using TVM Objects"""
String = tvm.runtime.container.String
tgt = tvm.target.Target(target=String("cuda --host llvm"))
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
tgt = tvm.target.Target(target=String("cuda"), host=String("llvm"))
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
@pytest.mark.skip(reason="Causing infinite loop because of pytest and handle issue")
def test_target_host_merge_3():
with pytest.raises(ValueError, match=r"target host has to be a string or dictionary."):
tvm.target.Target(tvm.target.Target("cuda --host llvm"), 12.34)
def test_target_with_host():
tgt = tvm.target.Target("cuda")
llvm = tvm.target.Target("llvm")
tgt = tgt.with_host(llvm)
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
cuda_host = tvm.target.Target("nvidia/jetson-nano")
tgt = tgt.with_host(cuda_host)
assert tgt.host.kind.name == "cuda"
assert tgt.host.attrs["arch"] == "sm_53"
assert tgt.host.attrs["max_shared_memory_per_block"] == 49152
assert tgt.host.attrs["max_threads_per_block"] == 1024
assert tgt.host.attrs["thread_warp_size"] == 32
assert tgt.host.attrs["registers_per_block"] == 32768
def test_canon_target_and_host_0():
target = None
host = None
target, host = Target.canon_target_and_host(target, host)
assert target is None
assert host is None
def test_canon_target_and_host_1():
target = None
host = "llvm"
with pytest.raises(AssertionError, match=r"Target host is not empty when target is empty."):
target, host = Target.canon_target_and_host(target, host)
def test_canon_target_and_host_2():
target = Target("cuda")
host = Target("llvm")
target, host = Target.canon_target_and_host(target, host)
assert target.kind.name == "cuda"
assert target.host. |
kind.name == "llvm"
def test_canon_target_and_host_3():
target = Target(target="cuda", host="llvm")
host = None
target, host = Target.canon_target_and_host(target, host)
assert target.kind.name == "cuda"
assert target.host.kind.name == "llvm"
assert host.kind.name == "llvm"
assert target.host == host
def test_canon_multi_target_and_host_0():
with pytest.raises(AssertionError):
Target.canon_multi_target_and_host(None)
def test_canon_multi_target_and_host_1():
raw_targets = Target.canon_multi_target_and_host({"kind": "llvm"})
assert len(raw_targets) == 1
assert raw_targets[0].kind.name == "llvm"
def test_canon_multi_target_and_host_2():
raw_targets = Target.canon_multi_target_and_host({1: "llvm", 2: "cuda"})
assert len(raw_targets) == 2
assert raw_targets[0].kind.name == "llvm"
assert raw_targets[1].kind.name == "cuda"
def test_canon_multi_target_and_host_3():
raw_targets = Target.canon_multi_target_and_host(["llvm", "cuda"])
assert len(raw_targets) == 2
assert raw_targets[0].kind.name == "llvm"
assert raw_targets[1].kind.name == "cuda"
def test_canon_multi_target_and_host_4():
raw_targets = Target.canon_multi_target_and_host("llvm")
assert len(raw_targets) == 1
assert raw_targets[0].kind.name == "llvm"
def test_canon_multi_target_and_host_5():
raw_targets = Target.canon_multi_target_and_host("cuda", "llvm")
assert len(raw_targets) == 1
assert raw_targets[0].kind.name == "cuda"
assert raw_targets[0].host.kind.name == "llvm"
def test_canon_multi_target_and_host_6():
"""Test `canon_target_and_host` by using TVM Objects"""
cuda_device_type = tvm.device("cuda").device_type
target = {cuda_device_type: Target(target="cuda", host="llvm")}
host = None
raw_targets_1 = Target.canon_multi_target_and_host(target, host)
assert len(raw_targets_1) == 1
assert raw_targets_1[0].kind.name == "cuda"
assert raw_targets_1[0].host.kind.name == "llvm"
target = {cuda_device_ty |
pe: Target(tvm.runtime.container.String("cuda"))}
host = Target(tvm.runtime.container.String("llvm"))
target = tvm.runtime.convert(target)
assert isinstance(target, tvm.ir.container.Map)
raw_targets_2 = Target.canon_multi_target_and_host(target, host)
assert len(raw_targets_2) == 1
assert raw_targets_2[0].kind.name == "cuda"
assert raw_targets_2[0].host.kind.name == "llvm"
def test_canon_target_map_and_host():
target_map = {"cuda": "cuda_module", "llvm": "cpu_module"}
target_map, host = Target.canon_target_map_and_host(target_map, "llvm")
assert host.kind.name == "llvm"
for t, v in target_map.items():
assert t.host.kind.name == "llvm"
if t.kind.name == "cuda":
assert v == "cuda_module"
elif t.kind.name == "llvm":
assert v == "cpu_module"
else:
assert False
def test_target_attr_bool_value():
target0 = Target("vulkan --supports_float16=True")
assert target0.attrs["supports_float16"] == 1
target1 = Target("vulkan --supports_float16=true")
assert target1.attrs["supports_float16"] == 1
target2 = Target("vulkan --supports_float16=False")
assert target2.attrs["supports_float16"] == 0
target3 = Target("vulkan --supports_float16=false")
assert target3.attrs["supports_float16"] == 0
def test_target_features():
target_no_features = Target("cuda")
assert target_no_features.features
assert not target_no_features.features.is_test
target_with_features = Target("test")
assert target_with_features.features.is_test
assert not target_with_features.features.is_missing
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import autotvm
from tvm |
import te
from tvm.topi |
import testing
from tvm.topi.utils |
import get_const_tuple, simplify
from tvm.topi |
import nn
def compute_plus_one_rank3(shape):
X = te.placeholder(shape, name="X", dtype="float32")
Y = te.compute(shape, lambda i, j, k: X[i, j, k] + 1, name="Compute_Y")
return X, Y
def schedule_plus_one_rank3(X, Y):
s = te.create_schedule(Y.op)
Xt = s.cache_read(X, "global.texture", [Y])
x, y, c = s[Xt].op.axis
s[Xt].bind(x, te.thread_axis("blockIdx.x"))
s[Xt].bind(y, te.thread_axis("threadIdx.x"))
s[Xt].vectorize(c)
x, y, c = s[Y].op.axis
xo, yo, xi, yi = s[Y].tile(x, y, 4, 4)
s[Y].bind(xo, te.thread_axis("blockIdx.x"))
s[Y].bind(yo, te.thread_axis("threadIdx.x"))
s[Y].vectorize(c)
return s
def compute_plus_one_rank5(shape):
X = te.placeholder(shape, name="X", dtype="float32")
Y = te.compute(shape, lambda i, j, k, l, m: X[i, j, k, l, m] + 1, name="Compute_Y")
return X, Y
def schedule_plus_one_rank5(X, Y):
s = te.create_schedule(Y.op)
Xt = s.cache_read(X, "global.texture", [Y])
a, b, c, d, e = s[Xt].op.axis
abc = s[Xt].fuse(a, b, c)
s[Xt].bind(abc, te.thread_axis("blockIdx.x"))
s[Xt].bind(d, te.thread_axis("threadIdx.x"))
s[Xt].vectorize(e)
a, b, c, d, e = s[Y].op.axis
abc = s[Y].fuse(a, b, c)
xo, yo, xi, yi = s[Y].tile(abc, d, 4, 4)
s[Y].bind(xo, te.thread_axis("blockIdx.x"))
s[Y].bind(yo, te.thread_axis("threadIdx.x"))
s[Y].vectorize(e)
return s
def compute_matmul(shape):
A = te.placeholder(shape, name="A", dtype="float32")
B = te.placeholder(shape, name="B", dtype="float32")
k = te.reduce_axis((0, shape[1]), name="k")
C = te.compute(
(shape[0] * shape[2], shape[0] * shape[2]),
lambda i, j: te.sum(
A[i
* B[j
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local: |
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
def copy_to_texture(stage):
_io, _k, _ii = s[stage].op.axis
s[stage].vectorize(_ii)
s[stage].bind(_io, bx)
s[stage].bind(_k, tx)
copy_to_texture(At)
copy_to_texture(Bt)
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_x, _y = s[Cl].op.axis
s[Cl].reorder(_k, _x, _y)
s[Cl].unroll(_x)
s[Cl].vectorize(_y)
if local:
s[Al].compute_at(s[Cl], _k)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _k)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_matmul_inner(shape):
A = te.placeholder(shape, name="A", dtype="float32")
B = te.placeholder(shape, name="B", dtype="float32")
k = te.reduce_axis((0, shape[1] * shape[2]), name="k")
C = te.compute(
(shape[0], shape[0]),
lambda i, j: te.sum(
A[i, k
* B[j, k
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul_inner(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
def copy_to_texture(stage):
_i, _ko, _ki = s[stage].op.axis
s[stage].vectorize(_ki)
s[stage].bind(_i, bx)
s[stage].bind(_ko, tx)
copy_to_texture(At)
copy_to_texture(Bt)
_i, _j = s |
[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_x, _y = s[Cl].op.axis
s[Cl].reorder(_x, _y, _k)
s[Cl].unroll(_x)
if local:
s[Al].compute_at(s[Cl], _x)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _x)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_matmul_vector_accumulator(shapeA, shapeB):
A = te.placeholder(shapeA, name="A", dtype="float32")
B = te.placeholder(shapeB, name="B", dtype="float32")
k = te.reduce_axis((0, shapeB[0]), name="k")
C = te.compute(
(shapeA[1], shapeB[1] * shapeB[2]),
lambda i, j: te.sum(
A[k
* B[k, j
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul_vector_accumulator(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
_y, _x, _v = s[stage].op.axis
s[stage].vectorize(_v)
s[stage].bind(_y, te.thread_axis("blockIdx.x"))
s[stage].bind(_x, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_a, _b = s[Cl].op.axis
_ko, _ki = s[Cl].split(_k, factor=4)
s[Cl].reorder(_ko, _a, _ki, _b)
s[Cl].unroll(_ki)
s[Cl].unroll(_a)
s[Cl].vectori |
ze(_b)
if local:
s[Al].compute_at(s[Cl], _a)
_aa, _ka, _ba = s[Al].op.axis
s[Al].vectorize(_ba)
s[Bl].compute_at(s[Cl], _ko)
_ab, _kb, _bb = s[Bl].op.axis
s[Bl].vectorize(_bb)
s[Bl].unroll(_ab)
return s
def compute_conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
c = te.reduce_axis((0, input_shape[1]), name="C")
c4 = te.reduce_axis((0, input_shape[-1]), name="c4")
kh = te.reduce_axis((0, filter_shape[0]), name="kh")
kw = te.reduce_axis((0, filter_shape[1]), name="kw")
conv = te.compute(
(input_shape[0], filter_shape[-2], input_shape[2], input_shape[3], filter_shape[-1]),
lambda n, ko, i, j, ki: te.sum(
data[n, c, i, j, c4].astype("float32")
* filt[kh, kw, c * input_shape[-1] + c4, ko, ki].astype("float32"),
axis=[kh, kw, c, c4],
),
name="conv2d_1x1",
)
return data, filt, conv
def schedule_conv2d_1x1_NCHWc_RSCKk(data, filt, conv):
s = te.create_schedule(conv.op)
A, B, C = data, filt, conv
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
_n, _ko, _h, _w, _ki = s[C].op.axis
s[C].vectorize(_ki)
s[C].bind(_n, te.thread_axis("blockIdx.x"))
s[C].bind(_ko, te.thread_axis("threadIdx.x"))
s[Cl].compute_at(s[C], _w)
_nl, _kol, _hl, _ |
wl, _kil = s[Cl].op.axis
_khl, _kwl, _cl, _cl4 = s[Cl].op.reduce_axis
_clo, _cli = s[Cl].split(_cl, factor=4)
s[Cl].reorder(_clo, _cli, _cl4, _kil)
s[Cl].unroll(_cli)
s[Cl].unroll(_cl4)
s[Cl].vectorize(_kil)
s[Al].compute_at(s[Cl], _cli)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _kwl)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
packed_data = te.compute(
(input_shape[0], input_shape[1], input_shape[2] * input_shape[3], input_shape[4]),
lambda i, j, k, l: data[i, j, k
name="packed_data",
)
packed_filter = te.compute(
(filter_shape[0], filter_shape[1] * filter_shape[2] * filter_shape[3], filter_shape[4]),
lambda i, j, k: filt[
i,
j
(j
j % filter_shape[3],
k,
],
name="packed_filter",
)
c = te.reduce_axis((0, input_shape[1]), name="C")
c4 = te.reduce_axis((0, input_shape[-1]), name="c4")
r = te.reduce_axis((0, filter_shape[1]), name="r")
s = te.reduce_axis((0, filter_shape[2]), name="s")
conv = te.compute(
(input_shape[0], filter_shape[3], input_shape[2], input_shape[3], filter_shape[4]),
lambda w, ko, h, n, ki: te.sum(
packed_data[w, c, h * input_shape[3] + n, c4].astype("float32")
* packed_filter[
c * input_shape[-1] + c4, ((r * filter_shape[2]) + s) * filter_shape[3] + ko, ki
].astype("float32"),
axis=[r, s, c, c4],
),
name="conv2d_1x1",
)
return data, filt, packed_data, packed_filter, conv
def schedule_conv2d_1x1_WCHNc_CRSKk(data, filt, packed_data, packed_filter, conv):
s = te.create_schedule(conv.o |
p)
cfg = autotvm.get_config()
s[packed_data].compute_inline()
s[packed_filter].compute_inline()
A, B, C = packed_data, packed_filter, conv
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
_w, _ko, _h, _n, _ki = s[C].op.axis
kernel_scope, _n = s[C].split(_n, nparts=1)
cfg.define_split("tile_f", _ko, num_outputs=4)
cfg.define_split("tile_w", _w, num_outputs=4)
cfg.define_split("tile_h", _h, num_outputs=4)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
bk, vk, tk, ki = cfg["tile_f"].apply(s, C, _ko)
bw, vw, tw, wi = cfg["tile_w"].apply(s, C, _w)
bh, vh, th, hi = cfg["tile_h"].apply(s, C, _h)
s[C].reorder(bh, _n, vh, th, hi)
bhn = s[C].fuse(bh, _n)
s[C].bind(bk, te.thread_axis("blockIdx.z"))
s[C].bind(bhn, te.thread_axis("blockIdx.y"))
s[C].bind(bw, te.thread_axis("blockIdx.x"))
s[C].bind(vk, te.thread_axis("vthread"))
s[C].bind(vh, te.thread_axis("vthread"))
s[C].bind(vw, te.thread_axis("vthread"))
s[C].bind(tk, te.thread_axis("threadIdx.z"))
s[C].bind(th, te.thread_axis("threadIdx.y"))
s[C].bind(tw, te.thread_axis("threadIdx.x"))
s[C].reorder(bw, bk, bhn, vw, vk, vh, tw, tk, th, ki, hi, wi, _ki)
s[C].vectorize(_ki)
s[Cl].compute_at(s[C], th)
_wl, _kol, _hl, _nl, _kil = s[Cl].op.axis
_khl, _kwl, _cl, _cl4 = s[Cl].op.reduce_axis
cfg.define_split("tile_c", _cl, num_outputs=2) |
cfg.define_split("tile_kh", _khl, num_outputs=2)
cfg.define_split("tile_kw", _kwl, num_outputs=2)
_clo, _cli = cfg["tile_c"].apply(s, Cl, _cl)
_khlo, _khli = cfg["tile_kh"].apply(s, Cl, _khl)
_kwlo, _kwli = cfg["tile_kw"].apply(s, Cl, _kwl)
s[Cl].reorder(_clo, _khlo, _kwlo, _cli, _cl4, _khli, _kwli, _kol, _hl, _nl, _kil, _wl)
s[Cl].unroll(_cl4)
s[Cl].unroll(_wl)
s[Cl].vectorize(_kil)
_wla, _cla, _hnla, _cl4a = s[Al].op.axis
s[Al].compute_at(s[Cl], _cli)
s[Al].vectorize(_cl4a)
s[Al].unroll(_wla)
_clb, _rskolb, _kilb = s[Bl].op.axis
s[Bl].compute_at(s[Cl], _cli)
s[Bl].vectorize(_kilb)
s[Bl].unroll(_clb)
s[C].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
WO, K, HO, N, K4 = get_const_tuple(C.shape)
RSC, _, _ = get_const_tuple(B.shape)
cfg.add_flop(2 * N * K * K4 * HO * WO * RSC)
return s
def compute_conv2d_NCHWc_KCRSk(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_channel_chunk, in_height, in_width, in_channel_block = Input.shape
num_filter_chunk, channel, kernel_h, kernel_w, num_filter_block = Filter.shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad |
_right)
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
rcc = te.reduce_axis((0, in_channel_chunk), name="rc")
rcb = te.reduce_axis((0, in_channel_block), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
Filter = te.compute(
(num_filter_chunk, channel * kernel_h * kernel_w, num_filter_block),
lambda ffc, crs, ffb: Filter[
ffc, crs
],
name="packed_filter",
)
return te.compute(
(batch, num_filter_chunk, out_height, out_width, num_filter_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
temp[
nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb
].astype(out_dtype)
* Filter[
ffc, ((rcc * in_channel_block + rcb) * kernel_h + ry) * kernel_w + rx, ffb
].astype(out_dtype),
axis=[rcc, rcb, ry, rx],
),
tag="conv2d_nchwc_kcrsk_texture",
)
def schedule_conv2d_NCHWc_KCRSk(cfg, s, conv):
"""schedule optimized for batch size = 1"""
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel |
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
n, fc, y, x, fb = s[OL].op.axis
rcc, rcb, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, OL, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[OL].vectorize(fb)
s[OL].unroll(rcb)
s[AA].compute_at |
(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
_, ICKHKW, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
def compute_conv2d_NCHWc_KCRSk_acc32(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_channel_chunk, in_height, in_width, in_channel_block = Input.shape
num_filter_chunk, channel, kernel_h, kernel_w, num_filter_block = Filter.shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down)
out_width = |
simplify((in_width - dilated_kernel_w + pad_left + pad_right)
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
rcc = te.reduce_axis((0, in_channel_chunk), name="rc")
rcb = te.reduce_axis((0, in_channel_block), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
Filter = te.compute(
(num_filter_chunk, channel * kernel_h * kernel_w, num_filter_block),
lambda ffc, crs, ffb: Filter[
ffc, crs
],
name="packed_filter",
)
conv = te.compute(
(batch, num_filter_chunk, out_height, out_width, num_filter_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb]
* Filter[ffc, ((rcc * in_channel_block + rcb) * kernel_h + ry) * kernel_w + rx, ffb]
).astype(out_dtype),
axis=[rcc, rcb, ry, rx],
),
tag="conv2d_nchwc_kcrsk_texture",
)
output = te.compute(conv.shape, lambda n, fc, y, x, fb: conv[n, fc, y, x, fb].astype("float32"))
return output
def schedule_conv2d_NCHWc_KCRSk_acc32(cfg, s, output):
"""schedule optimized for batch size = 1"""
conv = output.op.input_tensors[0]
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline( |
)
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
n, fc, y, x, fb = s[OL].op.axis
rcc, rcb, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, OL, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, r |
xi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[OL].vectorize(fb)
s[OL].unroll(rcb)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
_, ICKHKW, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
def compute_depthwise_conv2d_NCHWc_KCRSk_acc32(
Input, Filter, stride, padding, dilation, out_dtype=None
):
"""Depthwise convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, channel_chunk, in_height, in_width, channel_block = Input.shape
_, channel_multiplier, kernel_h, kernel_w, _ = Filter.shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad |
_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel_chunk = simplify(channel_chunk * channel_multiplier)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right)
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
Filter = te.compute(
(channel_chunk, channel_multiplier * kernel_h * kernel_w, channel_block),
lambda ffc, mrs, ffb: Filter[
ffc, mrs
],
name="packed_filter",
)
conv = te.compute(
(batch, out_channel_chunk, out_height, out_width, channel_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[
nn,
ffc
yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w,
ffb,
]
* Filter[
ffc
((ffc % channel_multiplier) * kernel_h + ry) * kernel_w + rx,
ffb,
]
).astype(out_dtype),
axis=[ry, rx],
),
tag="depthwise_conv2d_nchwc_kcrsk_texture",
)
return te.compute(
conv.shape, lambda n, ffc, y, x, ffb: conv[n, ffc, y, x, ffb].astype("float32")
)
def schedule_depthwise_conv2d_NCHWc_KCRSk_acc32(cfg, s, output):
"""schedule optimized for batch size = 1"""
conv = output.op.input_tensors[0]
n, fc, y, x, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_r |
y", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, t |
f, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
n, fc, y, x, fb = s[OL].op.axis
ry, rx = s[OL].op.reduce_axis
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(ryo, rxo, ryi, rxi, n, fc, y, x, fb)
s[OL].vectorize(fb)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
ICC, MKHKW, ICB = get_const_tuple(kernel.shape)
M = (OCC * OCB)
KHKW = MKHKW
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * KHKW)
def scheduler(compute, schedule, *args, **kwargs):
placeholders = compute(*args)
s = schedule(*placeholders, **kwargs)
return s, placeholders
def conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape):
placeholders = compute_conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape)
s = schedule_conv2d_1x1_NCHWc_RSCKk(*placeholders)
return s, placeholders
def conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape):
placeholders = compute_conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape)
s = schedule_conv2d_1x1_WCHNc_CRSKk(*placeholders)
return s, (placeholders[0], placeholders[1], placeholders[-1])
def conv2d_NCHWc_KCRSk( |
input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
conv = compute_conv2d_NCHWc_KCRSk(data, filt, [1, 1], [0, 0], [1, 1], "float32")
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [conv]])
schedule_conv2d_NCHWc_KCRSk(cfg, s, conv)
return s, (data, filt, conv)
def conv2d_NCHWc_KCRSk_fp32_acc(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
output = compute_conv2d_NCHWc_KCRSk_acc32(data, filt, [1, 1], [0, 0], [1, 1], "float32")
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [output]])
schedule_conv2d_NCHWc_KCRSk_acc32(cfg, s, output)
return s, (data, filt, output)
def depthwise_conv2d_NCHWc_KCRSk_acc32(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
output = compute_depthwise_conv2d_NCHWc_KCRSk_acc32(
data, filt, [1, 1], [0, 0], [1, 1], "float32"
)
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [output]])
schedule_depthwise_conv2d_NCHWc_KCRSk_acc32(cfg, s, output)
return s, (data, filt, output)
def ref_convolution(data, kernel, stride, pad): |
import mxnet as mx
groups = 1
kernel_size = (kernel.shape[2], kernel.shape[3])
num_filter = kernel.shape[0]
ref_res = mx.nd.Convolution(
data=mx.nd.array(data),
weight=mx.nd.array(kernel),
bias=None,
no_bias=True,
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
num_group=groups,
)
return ref_res.asnumpy()
def ref_depthwise_convolution(data, kernel, stride, pad): |
import mxnet as mx
groups = kernel.shape[0]
kernel_size = (kernel.shape[2], kernel.shape[3])
num_filter = kernel.shape[0]
multiplier = kernel.shape[1]
ref_res = mx.nd.Convolution(
data=mx.nd.array(data),
weight=mx.nd.array(kernel),
bias=None,
no_bias=True,
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
num_group=groups,
)
return ref_res.asnumpy()
def validate(workload, target, dev, input_shapes, *args, **kwargs):
s, placeholders = workload(*input_shapes, *args, **kwargs)
func = tvm.driver.build(s, [*placeholders], target=target, name="TestFunction")
args_tvm = []
args_np = []
for var in placeholders[:-1]:
var_np = np.random.uniform(size=[i.value for i in var.shape]).astype(var.dtype)
args_np.append(var_np)
args_tvm.append(tvm.nd.array(var_np, dev))
args_tvm.append(
tvm.nd.array(
np.zeros([i.value for i in placeholders[-1].shape], dtype=placeholders[-1].dtype), dev
)
)
func(*args_tvm)
if "plus_one" in workload.__name__:
np_result = args_np[0] + 1.0
elif "matmul" in workload.__name__:
if "inner" in workload.__name__:
np_result = np.matmul(
args_np[0].reshape(32, 256), args_np[1].reshape(32, 256).transpose(1, 0)
)
elif "accum" in workload.__name__:
np_result = np.matmul(
args_np[0].transpose((1, 0, 2)).reshape(64, 128), args_np[1].reshape(128, 64)
)
else:
np_result = np.matmul(
args_np[0].transpose((0, 2, 1)).reshape(128, 64),
args_np[1].transpose(1, 0, 2).reshape(64, 128),
)
elif "conv2d_1x1_NCHWc_RSCKk" in workload.__name__:
vec_length = args_np[1].shape[-1]
args_np[0] = (
args_np[0]
.transpose((0, 1, 4, 2, 3))
.reshape(
args_np[0].shape[0], |
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[3],
)
)
args_np[1] = (
args_np[1]
.reshape(
args_np[1].shape[0],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3] * args_np[1].shape[4],
)
.transpose((3, 2, 0, 1))
)
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1]
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(0, 1, 3, 4, 2)
elif "conv2d_1x1_WCHNc_CRSKk" in workload.__name__:
vec_length = args_np[1].shape[-1]
args_np[0] = (
args_np[0]
.transpose((3, 1, 4, 2, 0))
.reshape(
args_np[0].shape[3],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[0],
)
)
args_np[1] = (
args_np[1]
.reshape(
args_np[1].shape[0],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3] * args_np[1].shape[4],
)
.transpose((3, 0, 1, 2))
)
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1]
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(4, 1, 3, 0, 2)
elif "NCHW_KCRS" in workload.__name__:
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
elif "NCHWc_KCRSk" in workload.__name__:
vec_length = args_np[1].shape[-1]
args_np[0] = (
args |
_np[0]
.transpose((0, 1, 4, 2, 3))
.reshape(
args_np[0].shape[0],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[3],
)
)
args_np[1] = (
args_np[1]
.transpose((0, 4, 1, 2, 3))
.reshape(
args_np[1].shape[0] * args_np[1].shape[4],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3],
)
)
if "depthwise" in workload.__name__:
np_result = ref_depthwise_convolution(args_np[0], args_np[1], [], [])
else:
np_result = ref_convolution(args_np[0], args_np[1], [], [])
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1]
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(0, 1, 3, 4, 2)
np.testing.assert_allclose(args_tvm[-1].asnumpy(), np_result, rtol=1e-2, atol=1e-2)
class BaseSingleShapeValidator:
@tvm.testing.parametrize_targets("opencl")
def test_unary(self, test_func, input_shape, target, dev):
validate(test_func, target, dev, [input_shape]) |
class TestPlusOneRank3(BaseSingleShapeValidator):
input_shape = tvm.testing.parameter((32, 32, 4))
def plus_one(input_shape):
return scheduler(compute_plus_one_rank3, schedule_plus_one_rank3, input_shape)
test_func = tvm.testing.parameter(plus_one) |
class TestPlusOneRank5(BaseSingleShapeValidator):
input_shape = tvm.testing.parameter((32, 2, 4, 4, 4))
def plus_one(input_shape):
return scheduler(compute_plus_one_rank5, schedule_plus_one_rank5, input_shape)
test_func = tvm.testing.parameter(plus_one)
class TestMatmul:
input_shape = tvm.testing.parameter((32, 64, 4))
local = tvm.testing.parameter(False, True)
def matmul(input_shape, local):
return scheduler(compute_matmul, schedule_matmul, input_shape, local=local)
def matmul_inner(input_shape, local):
return scheduler(compute_matmul_inner, schedule_matmul_inner, input_shape, local=local)
test_func = tvm.testing.parameter(matmul, matmul_inner)
@tvm.testing.parametrize_targets("opencl")
def test_matmul(self, test_func, input_shape, local, target, dev):
validate(test_func, target, dev, [input_shape], local=local)
class TestMatmulVectorAccumulator:
shapeA = tvm.testing.parameter((32, 64, 4))
shapeB = tvm.testing.parameter((128, 16, 4))
local = tvm.testing.parameter(False, True)
def matmul_vector_accumulator(shapeA, shapeB, local):
return scheduler(
compute_matmul_vector_accumulator,
schedule_matmul_vector_accumulator,
shapeA,
shapeB,
local=local,
)
test_func = tvm.testing.parameter(matmul_vector_accumulator)
@tvm.testing.parametrize_targets("opencl")
def test_matmul_vec_acc(self, test_func, shapeA, shapeB, local, target, dev):
validate(test_func, target, dev, [shapeA, shapeB], local=local)
class BaseConv2DValidator:
@tvm.testing.parametrize_targets("opencl")
def test_conv2d(self, test_func, input_shapes, target, dev):
validate(test_func, target, dev, input_shapes) |
class TestConv2dNCHWcRSCKk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(1, 32, 56, 56, 4), (1, 1, 128, 32, 4)])
test_func = tvm.testing.parameter(conv2d_1x1_NCHWc_RSCKk) |
class TestConv2dWCHNcCRSKk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(56, 32, 56, 1, 4), (128, 1, 1, 32, 4)])
test_func = tvm.testing.parameter(conv2d_1x1_WCHNc_CRSKk) |
class TestConv2dNCHWcKCRSk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter(
[(1, 32, 56, 56, 4), (32, 128, 1, 1, 4)], [(1, 32, 112, 112, 4), (32, 128, 3, 3, 4)]
)
test_func = tvm.testing.parameter(conv2d_NCHWc_KCRSk, conv2d_NCHWc_KCRSk_fp32_acc) |
class TestDepthwiseConv2dNCHWcKCRSk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(1, 24, 257, 257, 4), (24, 1, 3, 3, 4)])
test_func = tvm.testing.parameter(depthwise_conv2d_NCHWc_KCRSk_acc32)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.