text
stringlengths 1
2.05k
|
---|
t32")))
check_c()
def test_floor():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A", dtype="float32")
B = te.compute(A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.floor", A(*i)), name="B")
s = te.create_schedule(B.op)
def check_c():
mhost = tvm.build(s, [A, B], "c", name="test_floor")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
ffloor = m["test_floor"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
ffloor(a, b)
tvm.testing.assert_allclose(b.numpy(), (np.floor(a.numpy()).view("float32")))
check_c()
def test_round():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A", dtype="float32")
B = te.compute(A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.round", A(*i)), name="B")
s = te.create_schedule(B.op)
def check_c():
mhost = tvm.build(s, [A, B], "c", name="test_round")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
fround = m["test_round"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
fround(a, b)
tvm.testing.assert_allclose(b.numpy(), (np.round(a.numpy()).view("float32")))
check_c()
def test_call_packed():
def fake_func(fname="fake.func"):
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
fake_func1 = tvm.tir.call_packed(fname, A[0])
ib.emit(fake_func1)
body = ib.get()
return A, body
def check_global_packed_func():
fname = "fake.func"
A, body = fake_func(fname)
func1 = tv |
m.tir.PrimFunc([A], body).with_attr("global_symbol", "func1")
B, body = fake_func()
func2 = tvm.tir.PrimFunc([B], body).with_attr("global_symbol", "func2")
mod = tvm.IRModule({"fake_func1": func1, "fake_func2": func2})
fcode = tvm.build(mod, None, "c")
src = fcode.get_source()
assert src.count(fname) == 2
suffix = "_packed"
packed_func_name = fname + suffix
assert src.find(packed_func_name) == -1
packed_func_real_name = "_".join(fname.split(".")) + suffix
func_declaration = "static void* %s = NULL;" % packed_func_real_name
assert src.count(func_declaration) == 1
check_global_packed_func()
if __name__ == "__main__":
test_add()
test_add_pipeline()
test_reinterpret()
test_ceil()
test_floor()
test_round()
test_call_packed() |
"""Test cross compilation""" |
import tvm |
import tvm.testing
from tvm |
import te |
import os |
import struct
from tvm |
import rpc
from tvm.contrib |
import utils, cc |
import numpy as np
@tvm.testing.requires_llvm
def test_llvm_add_pipeline():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
def verify_elf(path, e_machine):
with open(path, "rb") as fi:
arr = fi.read(20)
assert struct.unpack("ccc", arr[1:4]) == (b"E", b"L", b"F")
endian = struct.unpack("b", arr[0x5:0x6])[0]
endian = "<" if endian == 1 else ">"
assert struct.unpack(endian + "h", arr[0x12:0x14])[0] == e_machine
def build_i386():
temp = utils.tempdir()
target = "llvm -mtriple=i386-pc-linux-gnu"
f = tvm.build(s, [A, B, C], target)
path = temp.relpath("myadd.o")
f.save(path)
verify_elf(path, 0x03)
def build_arm():
target = "llvm -mtriple=armv7-none-linux-gnueabihf"
if not tvm.runtime.enabled(target):
print("Skip because %s is not enabled.." % target)
return
temp = utils.tempdir()
f = tvm.build(s, [A, B, C], target)
path = temp.relpath("myadd.o")
f.save(path)
verify_elf(path, 0x28)
asm_path = temp.relpath("myadd.asm")
f.save(asm_path)
host = os.environ.get("TVM_RPC_ARM_HOST", None)
remote = None
if host:
port = int(os.environ["TVM_RPC_ARM_PORT"])
try:
remote = rpc.connect(host, port)
except tvm.error.TVMError as e:
pass
if remote:
remote.upload(path)
farm = remote.load_module("myadd.o")
dev = remote.cpu(0)
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c |
= tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
farm(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
print("Verification finish on remote..")
build_i386()
build_arm()
if __name__ == "__main__":
test_llvm_add_pipeline() |
import re |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import topi
from tvm.contrib.nvcc |
import have_fp16, have_int8, have_bf16 |
import tvm.testing |
import pytest
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_vectorize_add():
num_thread = 8
def check_cuda(dtype, n, lanes):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
if dtype == "int8" and not have_int8(tvm.cuda(0).compute_version):
print("skip because gpu does not support int8")
return
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, dev)
fun(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
check_cuda("float32", 64, 2)
check_cuda("float32", 64, 3)
check_cuda("float32", 64, 4)
check_cuda("int8", 64, 2)
check_cuda("int8", 64, 3)
check_cuda("int8", 64, 4)
check_cuda("uint8", 64, 2)
check_cuda("uint8", 64, 3)
check_cuda("uint8", 64, 4)
check_cuda("float16", 64, 2)
check_cuda("float16", 64, 4)
check_cuda("float16", 64, 6)
check_cuda("float16", 64, 8)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_bf16_vectorize_add():
if not have_bf16(tvm.cuda(0).compute_version):
print("skip because gpu does not support bf16")
return
num_thread = 8
def np_float2np_bf16(arr):
"""Convert a numpy array of float to a numpy array
of bf16 in uint16"""
orig = arr.view("<u4")
bias = np.bitwise_and(np.right_shift(orig, 16), 1) + 0x7FFF
return np.right_shift(orig + bias, 16). |
astype("uint16")
def np_bf162np_float(arr):
"""Convert a numpy array of bf16 (uint16) to a numpy array
of float"""
u32 = np.left_shift(arr.astype("uint32"), 16)
return u32.view("<f4")
def check_cuda(n, lanes):
A = te.placeholder((n,), name="A", dtype="bfloat16x%d" % lanes)
B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
with tvm.transform.PassContext(
disabled_pass=["tir.BF16Promote", "tir.BF16CastElimination", "tir.BF16TypeLowering"]
):
fun = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
np_a = np.random.uniform(size=(n, lanes)).astype("float32")
np_a = np_bf162np_float(np_float2np_bf16(np_a))
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np_float2np_bf16(np_a))
c = tvm.nd.empty((n,), B.dtype, dev)
fun(a, c)
c = tvm.nd.empty((n, lanes), "uint16", dev).copyfrom(c)
tvm.testing.assert_allclose(c.numpy(), np_float2np_bf16(np_a + 1))
check_cuda(64, 2)
check_cuda(64, 4)
check_cuda(64, 6)
check_cuda(64, 8)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_multiply_add():
num_thread = 8
def check_cuda(dtype, n, lanes):
if dtype == "int8" and not have_int8(tvm.cuda(0).compute_version):
print("skip because gpu does not support int8")
return
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.placeholder((n,), name="B", dtype="%sx%d" % (dtype, lanes))
C = te.placeholder((n,), name="C", dtype="int32")
D = te.compute(
(n,), lambda i: tvm.tir.call_pure_extern("int32", "__dp4a", A[i], B[i], C[i]), name="D"
)
s = te.create_schedule(D.op)
xo, xi = s[D].split(D.op.axis[0], factor=num_thread)
s[D].bind(xo, bx)
s[D].bi |
nd(xi, tx)
fun = tvm.build(s, [A, B, C, D], "cuda")
np_a = np.random.randint(low=-128, high=127, size=(n, lanes))
np_b = np.random.randint(low=-128, high=127, size=(n, lanes))
np_c = np.random.randint(low=0, high=127, size=(n,))
np_d = [sum(x * y) + z for x, y, z in zip(np_a, np_b, np_c)]
dev = tvm.cuda(0)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np_a)
b = tvm.nd.empty((n,), B.dtype, dev).copyfrom(np_b)
c = tvm.nd.empty((n,), C.dtype, dev).copyfrom(np_c)
d = tvm.nd.empty((n,), D.dtype, dev)
fun(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), np_d)
check_cuda("int8", 64, 4)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_vectorize_load():
num_thread = 8
def check_cuda(dtype, n, lanes):
dev = tvm.cuda(0)
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i], name="B")
s = te.create_schedule(B.op)
block, thread = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(block, bx)
s[B].bind(thread, tx)
fun = tvm.build(s, [A, B], "cuda", name="vector_load")
np_a = np.random.randint(low=-128, high=127, size=(n, lanes))
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np_a)
b = tvm.nd.empty((n,), B.dtype, dev)
fun(a, b)
tvm.testing.assert_allclose(a.numpy(), b.numpy())
check_cuda("int8", 64, 2)
check_cuda("int8", 64, 3)
check_cuda("int8", 64, 4)
check_cuda("int8", 64, 8)
check_cuda("int8", 64, 16)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_make_int8():
def check_cuda(n, value, lanes):
dtype = "int8"
dev = tvm.cuda(0)
A = te.compute((n, lanes), lambda i, j: tvm.tir.const(value, dtype=dtype))
s = te.create_schedule(A.op)
y, x = s[A].op.axis
s[A].vectorize(x)
s[A].bind(y, bx)
fun = tvm.build(s, [A], "cuda", name="make_int8x4") |
np_a = np.full((n, lanes), value, dtype=dtype)
a = tvm.nd.empty(np_a.shape, dtype, dev)
fun(a)
np.testing.assert_equal(a.numpy(), np_a)
check_cuda(64, np.int8(0xAB), 4)
check_cuda(64, 0, 4)
check_cuda(64, -3, 4)
check_cuda(64, np.int8(0xAB), 3)
check_cuda(64, 0, 3)
check_cuda(64, -3, 3)
check_cuda(64, np.int8(0xAB), 2)
check_cuda(64, 0, 2)
check_cuda(64, -3, 2)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_make_int4():
def check_cuda(n, value, lanes):
dtype = "int4"
dev = tvm.cuda(0)
A = te.compute((n, lanes), lambda i, j: tvm.tir.const(value, dtype=dtype))
s = te.create_schedule(A.op)
y, x = s[A].op.axis
s[A].vectorize(x)
s[A].bind(y, bx)
kernel_name = "make_int4x" + str(lanes)
fun = tvm.build(s, [A], "cuda", name=kernel_name)
np_a = np.full((n, lanes), value, dtype="int8")
a = tvm.nd.empty((n, lanes), dtype, dev)
fun(a)
np.testing.assert_equal(a.numpy(), np_a)
check_cuda(64, 1, 4)
check_cuda(64, 7, 4)
check_cuda(64, 1, 8)
check_cuda(64, 7, 8)
check_cuda(64, 1, 16)
check_cuda(64, 7, 16)
check_cuda(64, 1, 32)
check_cuda(64, 7, 32)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_inf_nan():
target = "cuda"
def check_inf_nan(dev, n, value, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
fun(a, c)
dev = tvm.device(target, 0)
check_inf_nan(dev, 1, -float("inf"), "float32")
check_inf_nan(dev, 1, -float("inf"), "float64")
check_inf_nan(dev, 1, float("inf"), "float32")
check_inf_nan(dev, 1, flo |
at("inf"), "float64")
check_inf_nan(dev, 1, float("nan"), "float32")
check_inf_nan(dev, 1, float("nan"), "float64")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_shuffle():
idxm = tvm.tir.indexmod
a = te.placeholder((64,), "int32")
b = te.placeholder((64,), "int32")
c = te.compute((64,), lambda x: a[x] + b[x - idxm(x, 4) + (3 - idxm(x, 4))])
sch = te.create_schedule(c.op)
x = c.op.axis[0]
xo, xi = sch[c].split(x, 4)
thrx = te.thread_axis("threadIdx.x")
sch[c].bind(xo, thrx)
sch[c].vectorize(xi)
def MyVectorize():
def vectorizer(op):
if op.kind == tvm.tir.ForKind.VECTORIZED:
idx = tvm.tir.Ramp(4 * thrx.var, 1, 4)
store = op.body
value = store.value
new_a = tvm.tir.BufferLoad(value.a.buffer, [idx])
bs, ids = [], []
for i in range(4):
bs.append(tvm.tir.BufferLoad(value.b.buffer, [4 * thrx.var + i]))
ids.append(3 - i)
new_b = tvm.tir.Shuffle(bs, ids)
return tvm.tir.BufferStore(store.buffer, new_a + new_b, [idx])
return None
def _transform(f, *_):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, vectorizer, ["tir.For"])
)
return tvm.tir.transform.prim_func_pass(_transform, opt_level=0, name="MyVectorize")
with tvm.transform.PassContext(config={"tir.add_lower_pass": [(1, MyVectorize())]}):
module = tvm.build(sch, [a, b, c], target="cuda")
a_ = np.array(list(range(64)), dtype="int32")
b_ = np.array((list(range(4))[::-1]) * 16, dtype="int32")
c_ = np.zeros((64,), dtype="int32")
ref = a_ + np.array((list(range(4))) * 16, dtype="int32")
nda, ndb, ndc = [tvm.nd.array(i, tvm.cuda(0)) for i in [a_, b_, c_]]
module(nda, ndb, ndc)
tvm.testing.assert_allclose(ndc.numpy(), ref)
@tvm.testing.parametrize_targets("cu |
da", "rocm")
def test_crossthread_reduction1(target, dev):
n = te.var("n")
m = te.var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "m")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
def sched(nthd):
s = te.create_schedule(B.op)
ko, _ = s[B].split(B.op.reduce_axis[0], nparts=nthd)
s[B].bind(ko, te.thread_axis("threadIdx.x"))
s[B].bind(B.op.axis[0], te.thread_axis("blockIdx.x"))
func = tvm.build(s, [A, B], target)
return func
def verify(nthd):
func = sched(nthd)
nn = 3
vals = [nthd - 1, nthd, nthd + 1]
for kk in [x for x in vals]:
size = (nn, kk)
a = tvm.nd.array(np.random.uniform(size=size).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-3)
verify(16)
verify(32)
verify(64)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_crossthread_reduction2(target, dev):
n = te.var("n")
k0 = te.var("k0")
k1 = te.var("k1")
A = te.placeholder((n, k0, k1), name="A")
k0 = te.reduce_axis((0, k0), "k0")
k1 = te.reduce_axis((0, k1), "k1")
B = te.compute((n,), lambda i: te.sum(A[i, k0, k1], axis=(k0, k1)), name="B")
def sched(nthdx, nthdy):
s = te.create_schedule(B.op)
k0o, _ = s[B].split(B.op.reduce_axis[0], nparts=nthdx)
k1o, _ = s[B].split(B.op.reduce_axis[1], nparts=nthdy)
s[B].bind(k0o, te.thread_axis("threadIdx.x"))
s[B].bind(k1o, te.thread_axis("threadIdx.y"))
s[B].bind(B.op.axis[0], te.thread_axis("blockIdx.x"))
func = tvm.build(s, [A, B], target)
return func
def verify(nthdx, nthdy):
func = sched(nthdx, nthdy)
nn = 3
vx = [nthdx - 1, nthdx, nthdx + 1]
vy = [nthdy - 1, nthdy, nthdy + 1]
for kk0, kk1 in [(x, y) for x in vx for y i |
n vy]:
size = (nn, kk0, kk1)
a = tvm.nd.array(np.random.uniform(size=size).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=(1, 2)), rtol=1e-3)
verify(16, 16)
verify(32, 32)
verify(16, 32)
verify(32, 16)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_reduction_binding():
k = te.reduce_axis((0, 32), "k")
A = te.placeholder((96, 32), name="A")
B = te.compute((96,), lambda m: te.sum(A[m, k], axis=k), name="B")
s = te.create_schedule(B.op)
s[B].reorder(B.op.reduce_axis[0], B.op.axis[0])
mo, _ = s[B].split(B.op.axis[0], 32)
s[B].bind(mo, te.thread_axis("blockIdx.x"))
fcuda = tvm.build(s, [A, B], "cuda")
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_rfactor_predicates(target, dev):
n = te.reduce_axis((0, 129), "n")
A = te.placeholder((129,), name="A")
B = te.compute((1,), lambda b: te.sum(A[n], axis=n), name="B")
s = te.create_schedule(B.op)
_, ni = s[B].split(s[B].op.reduce_axis[0], factor=8)
BF = s.rfactor(B, ni, 0)
s[B].set_store_predicate(tx.var.equal(0))
s[B].bind(s[B].op.reduce_axis[0], tx)
s[B].bind(s[B].op.axis[0], bx)
s[BF].compute_at(s[B], s[B].op.axis[0])
_, noi = s[BF].split(s[BF].op.reduce_axis[0], factor=2)
BF2 = s.rfactor(BF, noi, 0)
s[BF].bind(s[BF].op.axis[0], tx)
s[BF2].compute_at(s[BF], s[BF].op.axis[1])
fcuda = tvm.build(s, [A, B], target)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_const_float_to_half():
from tvm |
import autotvm
shape = (2, 3, 4)
a = te.placeholder(shape, dtype="float16", name="a")
b = tvm.tir.const(0.5, dtype="float16")
c = te.compute(shape, lambda i, j, k: a[i, j, k] > b, name="c")
s = te.create_schedule(c.op)
axes = [axis for axis in c.op.axis]
fused = s[c].fuse(*axes)
bx, tx = s[c].split(fused, factor=64)
s[c].bind(bx, te.thread_axis("blockIdx.x"))
s[c].bind(tx, te.thread_axis("threadIdx.x"))
func = tvm.build(s, [a, c], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=shape).astype(a.dtype)
c_np = np.zeros(shape=shape, dtype=c.dtype)
a = tvm.nd.array(a_np, dev)
c = tvm.nd.array(c_np, dev)
func(a, c)
np.testing.assert_equal(c.numpy(), a_np > b.value)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_reduction():
def check(device, dtype, m=32, n=32):
if not tvm.testing.device_enabled(device):
print("Skipping", device)
return
dev = tvm.device(device, 0)
a = te.placeholder((m, n), name="a", dtype=dtype)
b = te.placeholder((m, n), name="b", dtype=dtype)
c = a + b
d = a * b
e = topi.elemwise_sum([c, d])
g = topi.sum(e)
with tvm.target.Target(device):
sg = topi.cuda.schedule_reduce(g)
func = tvm.build(sg, [a, b, g], device)
a_np = np.random.uniform(size=(m, n)).astype(a.dtype)
b_np = np.random.uniform(size=(m, n)).astype(b.dtype)
g_np = np.sum(np.add(a_np * b_np, a_np + b_np))
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(b_np, dev)
g_nd = tvm.nd.array(np.zeros(g_np.shape, dtype=g_np.dtype), dev)
func(a_nd, b_nd, g_nd)
tvm.testing.assert_allclose(g_nd.numpy(), g_np, rtol=1e-3)
check("cuda", "float32")
check("rocm", "float32")
check("cuda", "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_mix_threaded_and_normal_reduction():
def check(device, d |
type, m=32, n=32):
if not tvm.testing.device_enabled(device):
print("Skipping", device)
return
dev = tvm.device(device, 0)
if dtype == "float16" and not have_fp16(dev.compute_version):
print("Skip because gpu does not have fp16 support")
return
a = tvm.te.placeholder((m, n), name="a", dtype=dtype)
b = topi.sum(a)
with tvm.target.Target(device):
sb = tvm.te.create_schedule(b.op)
i, _ = b.op.reduce_axis
sb[b].bind(i, tvm.te.thread_axis("threadIdx.x"))
func = tvm.build(sb, [a, b], device)
a_np = np.random.uniform(size=(m, n)).astype(a.dtype)
b_np = np.sum(a_np)
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), dev)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.numpy(), b_np, rtol=1e-3)
check("cuda", "float32")
check("rocm", "float32")
check("cuda", "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_floordiv_with_vectorization():
with tvm.target.cuda():
n = 256
k = 37
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: A[tvm.tir.floordiv(i, k)], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], nparts=1)
xio, xii = s[B].split(xi, factor=4)
s[B].vectorize(xii)
s[B].bind(xo, bx)
s[B].bind(xio, tx)
func = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
b_np = np.array([a_np[i
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), dev)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.numpy(), b_np, rtol=1e-3)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_floormod_with_vectorization():
with tvm.target.cuda(): |
n = 256
k = 37
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: A[tvm.tir.floormod(i, k)], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], nparts=1)
xio, xii = s[B].split(xi, factor=4)
s[B].vectorize(xii)
s[B].bind(xo, bx)
s[B].bind(xio, tx)
func = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
b_np = np.array([a_np[i % k] for i in range(0, n)])
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), dev)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.numpy(), b_np, rtol=1e-3)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_casts():
def check(t0, t1, factor):
if (t0 == "float16" or t1 == "float16") and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
n = 128
A = te.placeholder((n,), dtype=t0, name="A")
B = te.placeholder((n,), dtype=t1, name="B")
C = te.compute((n,), lambda i: A[i] + topi.cast(B[i], A.dtype), name="C")
s = tvm.te.create_schedule(C.op)
ob, ib = s[C].split(s[C].op.axis[0], factor=factor)
s[C].vectorize(ib)
s[C].bind(ob, tx)
func = tvm.build(s, [A, B, C], "cuda")
dev = tvm.cuda(0)
low, high = (0, 20) if t0.startswith("u") or t1.startswith("u") else (-10, 10)
a_np = np.random.randint(low, high, size=n).astype(A.dtype)
b_np = np.random.randint(low, high, size=n).astype(B.dtype)
c_np = (a_np + b_np).astype(A.dtype)
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(b_np, dev)
c_nd = tvm.nd.array(np.zeros(c_np.shape, dtype=c_np.dtype), dev)
func(a_nd, b_nd, c_nd)
tvm.testing.assert_allclose(c_nd.numpy(), c_np, rtol=1e-3)
def skip(t0 |
, t1):
if t0 == t1:
return True
skip_set = {"float16", "uint8", "int8"}
if t0 in skip_set and t1 in skip_set:
return True
return False
types_4 = [
"float16",
"float32",
"int8",
"uint8",
"int16",
"uint16",
"int32",
"uint32",
"float64",
"int64",
"uint64",
]
types_8 = ["float16", "float32", "int8", "uint8", "int16", "uint16", "int32", "uint32"]
for t0, t1 in [(x, y) for x in types_4 for y in types_4 if not skip(x, y)]:
check(t0, t1, 4)
for t0, t1 in [(x, y) for x in types_8 for y in types_8 if not skip(x, y)]:
check(t0, t1, 8)
check("int8", "uint8", 16)
check("uint8", "int8", 16)
def sched(B):
s = te.create_schedule(B.op)
io, ii = s[B].split(s[B].op.axis[0], nparts=1)
iio, iii = s[B].split(ii, nparts=32)
_, iiii = s[B].split(iii, factor=4)
s[B].vectorize(iiii)
s[B].bind(io, bx)
s[B].bind(iio, tx)
return s
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_intrin1():
test_funcs = [
(tvm.tir.floor, lambda x: np.floor(x)),
(tvm.tir.ceil, lambda x: np.ceil(x)),
(tvm.tir.trunc, lambda x: np.trunc(x)),
(tvm.tir.abs, lambda x: np.fabs(x)),
(tvm.tir.round, lambda x: np.round(x)),
(tvm.tir.exp, lambda x: np.exp(x)),
(tvm.tir.exp2, lambda x: np.exp2(x)),
(tvm.tir.exp10, lambda x: np.power(10, x)),
(tvm.tir.log, lambda x: np.log(x)),
(tvm.tir.log2, lambda x: np.log2(x)),
(tvm.tir.log10, lambda x: np.log10(x)),
(tvm.tir.tan, lambda x: np.tan(x)),
(tvm.tir.cos, lambda x: np.cos(x)),
(tvm.tir.cosh, lambda x: np.cosh(x)),
(tvm.tir.sin, lambda x: np.sin(x)),
(tvm.tir.sinh, lambda x: np.sinh(x)),
(tvm.tir.atan, lambda x: np.arctan(x)),
(tvm.tir.tanh, lambda x: np.tanh(x)),
(tvm.tir.sqrt, lambda x: np.sqrt(x)),
]
def run |
_test(tvm_intrin, np_func, dtype):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
skip_set = {
tvm.tir.abs,
tvm.tir.round,
tvm.tir.tan,
tvm.tir.atan,
tvm.tir.tanh,
tvm.tir.cosh,
tvm.tir.sinh,
}
if dtype == "float16" and tvm_intrin in skip_set:
print("Skip because '{0}' does not support fp16 yet".format(tvm_intrin.__name__))
return
n = 128
A = te.placeholder((n,), dtype=dtype, name="A")
B = te.compute((n,), lambda *i: tvm_intrin(A(*i)), name="B")
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(A.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np_func(a.numpy()), atol=1e-3, rtol=1e-3)
for func in test_funcs:
run_test(*func, "float32")
run_test(*func, "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_intrin2(dtype="float32"):
c2 = tvm.tir.const(2, dtype=dtype)
test_funcs = [
(tvm.tir.power, lambda x: np.power(x, 2.0)),
(tvm.tir.fmod, lambda x: np.fmod(x, 2.0)),
]
def run_test(tvm_intrin, np_func):
n = 128
A = te.placeholder((n,), dtype=dtype, name="A")
B = te.compute((n,), lambda i: tvm_intrin(A[i], c2), name="B")
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(A.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np_func(a.numpy()), atol=1e-3, rtol=1e-3)
for func in test_funcs:
run_test(*func)
@tvm.testing.requires_gpu
@tvm. |
testing.requires_cuda
def test_vectorized_popcount():
def ref_popcount(x):
cnt = 0
while x:
x -= x & -x
cnt += 1
return cnt
def run_test(dtype):
n = 128
A = te.placeholder((n,), dtype=dtype, name="A")
B = te.compute((n,), lambda i: tvm.tir.popcount(A[i]), name="B")
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a = tvm.nd.array(np.random.randint(0, 100000, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(B.dtype), dev)
f(a, b)
ref = np.vectorize(ref_popcount)(a.numpy())
tvm.testing.assert_allclose(b.numpy(), ref)
run_test("uint32")
run_test("uint64")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_vectorize_load_permute_pad():
def check_cuda(dtype, n, l, padding, lanes):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
dev = tvm.cuda(0)
A = tvm.te.placeholder((n, l), name="A", dtype=dtype)
B = tvm.te.compute(
(n
lambda i, j, k: tvm.te.if_then_else(
tvm.te.any(j < padding, j >= l + padding),
tvm.runtime.convert(0).astype(dtype),
A[i * lanes + k, j - padding],
),
name="B",
)
s = te.create_schedule(B.op)
block, thread, vectorize = s[B].op.axis
s[B].bind(block, bx)
s[B].bind(thread, tx)
s[B].vectorize(vectorize)
fun = tvm.build(s, [A, B], "cuda", name="vector_load_permute_pad")
np_a = np.random.randint(low=-128, high=127, size=(n, l)).astype(A.dtype)
a = tvm.nd.empty((n, l), A.dtype, dev).copyfrom(np_a)
b = tvm.nd.empty((n
fun(a, b)
np_a_reshape = np_a.reshape(n
ref = np.pad(
np_a_reshape, ((0, 0), (padding, padding), (0, 0)), mode="constant", constant_ |
values=0
)
tvm.testing.assert_allclose(b.numpy(), ref)
check_cuda("int8", 64, 16, 3, 2)
check_cuda("uint8", 64, 16, 3, 2)
check_cuda("int8", 64, 16, 3, 4)
check_cuda("uint8", 64, 16, 3, 4)
check_cuda("int32", 64, 16, 3, 4)
check_cuda("float16", 64, 16, 3, 4)
check_cuda("float32", 64, 16, 3, 4)
def vcf_check_common(s, args):
N = 512
stmt = tvm.lower(s, args)
inside_broadcast = [False]
def pre_visit(stmt):
if isinstance(stmt, tvm.tir.Broadcast):
inside_broadcast[0] = True
assert isinstance(stmt.value, (tvm.tir.IntImm, tvm.tir.FloatImm, tvm.tir.BufferLoad))
if isinstance(stmt, (tvm.tir.BufferStore, tvm.tir.BufferLoad)):
is_ramp_index = isinstance(stmt.indices[-1], tvm.tir.Ramp)
is_vectorized_buffer = re.match(r"^.*x\d+$", stmt.buffer.dtype)
if isinstance(stmt, tvm.tir.BufferLoad):
assert inside_broadcast[0] or is_ramp_index or is_vectorized_buffer
return stmt
else:
assert is_ramp_index or is_vectorized_buffer
return None
def post_visit(stmt):
if isinstance(stmt, tvm.tir.Broadcast):
inside_broadcast[0] = False
return None
tvm.tir.stmt_functor.ir_transform(stmt["main"].body, pre_visit, post_visit)
tgt = tvm.target.cuda()
mod = tvm.build(s, args, tgt)
dev = tvm.device("cuda", 0)
a = tvm.nd.array(np.random.uniform(size=(512, 512)).astype("float32"), dev)
b = tvm.nd.array(np.random.uniform(size=(512, 512)).astype("float32"), dev)
c = tvm.nd.array(np.zeros((512, 512), dtype="float32"), dev)
mod(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np.dot(a.numpy(), b.numpy()), rtol=1e-5)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_cooperative_fetching_x():
N = 512
A = te.placeholder((N, N), name="A", dtype="float32")
B = |
te.placeholder((N, N), name="B", dtype="float32")
k = te.reduce_axis((0, N), name="k")
C = te.compute((N, N), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k))
s = te.create_schedule(C.op)
i, j = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
i3, i4 = s[C].split(i, factor=4)
i2, i3 = s[C].split(i3, factor=2)
i1, i2 = s[C].split(i2, factor=8)
i0, i1 = s[C].split(i1, factor=1)
j3, j4 = s[C].split(j, factor=4)
j2, j3 = s[C].split(j3, factor=2)
j1, j2 = s[C].split(j2, factor=8)
j0, j1 = s[C].split(j1, factor=2)
k1, k2 = s[C].split(k, factor=8)
k0, k1 = s[C].split(k1, factor=8)
s[C].reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3, k2, i4, j4)
block_it = s[C].fuse(i0, j0)
s[C].bind(block_it, tvm.te.thread_axis("blockIdx.x"))
vthread_it = s[C].fuse(i1, j1)
s[C].bind(vthread_it, tvm.te.thread_axis("vthread"))
thread_it = s[C].fuse(i2, j2)
s[C].bind(thread_it, tvm.te.thread_axis("threadIdx.x"))
s[C].vectorize(j4)
s[AA].compute_at(s[C], k0)
iaa, jaa = s[AA].op.axis
s[BB].compute_at(s[C], k0)
ibb, jbb = s[BB].op.axis
aa_fused = s[AA].fuse(iaa, jaa)
bb_fused = s[BB].fuse(ibb, jbb)
aa1, aa2 = s[AA].split(aa_fused, factor=4)
aa0, aa1 = s[AA].split(aa1, factor=64)
bb1, bb2 = s[BB].split(bb_fused, factor=4)
bb0, bb1 = s[BB].split(bb1, factor=64)
s[AA].bind(aa1, tvm.te.thread_axis("threadIdx.x"))
s[AA].vectorize(aa2)
s[BB].bind(bb1, tvm.te.thread_axis("threadIdx.x"))
s[BB].vectorize(bb2)
vcf_check_common(s, [A, B, C])
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_cooperative_fetching_xy():
N = 512
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
k = te.reduce_axis((0, N), name="k")
C = te.compute((N, N), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k))
s = te.create_schedule(C.op)
i, j = s[C].op.axis
k = s[C].op.reduce_axis[ |
0]
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
i3, i4 = s[C].split(i, factor=4)
i2, i3 = s[C].split(i3, factor=2)
i1, i2 = s[C].split(i2, factor=8)
i0, i1 = s[C].split(i1, factor=1)
j3, j4 = s[C].split(j, factor=4)
j2, j3 = s[C].split(j3, factor=2)
j1, j2 = s[C].split(j2, factor=8)
j0, j1 = s[C].split(j1, factor=2)
k1, k2 = s[C].split(k, factor=8)
k0, k1 = s[C].split(k1, factor=8)
s[C].reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3, k2, i4, j4)
block_it = s[C].fuse(i0, j0)
s[C].bind(block_it, tvm.te.thread_axis("blockIdx.x"))
vthread_it = s[C].fuse(i1, j1)
s[C].bind(vthread_it, tvm.te.thread_axis("vthread"))
s[C].bind(i2, tvm.te.thread_axis("threadIdx.y"))
s[C].bind(j2, tvm.te.thread_axis("threadIdx.x"))
s[C].vectorize(j4)
s[AA].compute_at(s[C], k0)
iaa, jaa = s[AA].op.axis
s[BB].compute_at(s[C], k0)
ibb, jbb = s[BB].op.axis
aa_fused = s[AA].fuse(iaa, jaa)
bb_fused = s[BB].fuse(ibb, jbb)
aa2, aa3 = s[AA].split(aa_fused, factor=4)
aa1, aa2 = s[AA].split(aa2, factor=8)
aa0, aa1 = s[AA].split(aa1, factor=8)
bb2, bb3 = s[BB].split(bb_fused, factor=4)
bb1, bb2 = s[BB].split(bb2, factor=8)
bb0, bb1 = s[BB].split(bb1, factor=8)
s[AA].bind(aa1, tvm.te.thread_axis("threadIdx.y"))
s[AA].bind(aa2, tvm.te.thread_axis("threadIdx.x"))
s[AA].vectorize(aa3)
s[BB].bind(bb1, tvm.te.thread_axis("threadIdx.y"))
s[BB].bind(bb2, tvm.te.thread_axis("threadIdx.x"))
s[BB].vectorize(bb3)
vcf_check_common(s, [A, B, C])
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_unrolled_vectorization():
dtype = "float32"
target = "cuda"
N = 128
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
k = te.reduce_axis((0, N), name="k")
C = te.compute((N, N), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
s = te.create_schedule([C.op])
CC = s.cache_write(C, "local")
i, j |
= s[C].op.axis
bx, tx, ii, ji = s[C].tile(i, j, 1, 2)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].vectorize(ji)
s[CC].compute_at(s[C], tx)
i, j = s[CC].op.axis
k = s[CC].op.reduce_axis[0]
ko, ki = s[CC].split(k, 2)
s[CC].unroll(ki)
s[CC].vectorize(j)
dev = tvm.device(target)
a_tvm = tvm.nd.array(np.ones((N, N)).astype(dtype), device=dev)
b_tvm = tvm.nd.array(np.ones((N, N)).astype(dtype), device=dev)
c_tvm = tvm.nd.empty((N, N), device=dev)
func_tvm = tvm.build(s, [A, B, C], target=target)
func_tvm(a_tvm, b_tvm, c_tvm)
c_np = c_tvm.numpy()
tvm.testing.assert_allclose(c_np, N * np.ones((N, N)))
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_try_unaligned_vector_load():
def get_compute(N, C_N, offset):
A = te.placeholder((N,), name="A", dtype="float16")
C = te.compute((C_N,), lambda i: A[i + offset], name="C")
return N, C_N, A, C
def get_compute_unaligned():
return get_compute(3, 2, 1)
def get_compute_aligned():
return get_compute(4, 2, 2)
def build(A, C, N, C_N):
s = te.create_schedule(C.op)
oi, ii = s[C].split(C.op.axis[0], factor=2)
s[C].bind(oi, te.thread_axis("threadIdx.x"))
s[C].vectorize(ii)
tgt = tvm.target.Target(target="cuda", host="llvm")
dev = tvm.device(tgt.kind.name, 0)
f = tvm.build(s, [A, C], tgt, name="foo")
kernel_source = f.imported_modules[0].get_source()
a_data = np.arange(0, N).astype(A.dtype)
a = tvm.nd.array(a_data, dev)
c = tvm.nd.array(np.zeros(C_N, dtype=C.dtype), dev)
f(a, c)
return a_data, c.numpy(), kernel_source
N, C_N, A, C = get_compute_unaligned()
a_data, c, kernel_source = build(A, C, N, C_N)
assert "A + (1)" not in kernel_source
expected = a_data[1 : C_N + 1]
assert np.allclose(c, expected), f"expected={expected}\nactual={c}"
N, C_N, A, C = |
get_compute_aligned()
a_data, c, kernel_source = build(A, C, N, C_N)
assert "A + 2" in kernel_source
expected = a_data[2 : C_N + 2]
assert np.allclose(c, expected), f"expected={expected}\nactual={c}"
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm
from tvm |
import te
from tvm.contrib |
import utils |
import numpy as np |
import tvm.testing
@tvm.testing.requires_gpu
def test_large_uint_imm():
value = (1 << 63) + 123
other = tvm.tir.const(3, "uint64")
n = 12
num_thread = 2
A = te.compute((n,), lambda *i: tvm.tir.const(value, "uint64") + other, name="A")
s = te.create_schedule(A.op)
xo, xi = s[A].split(A.op.axis[0], factor=num_thread)
s[A].bind(xi, te.thread_axis("threadIdx.x"))
s[A].bind(xo, te.thread_axis("blockIdx.x"))
def check_target(device):
if not tvm.testing.device_enabled(device):
return
dev = tvm.device(device, 0)
f = tvm.build(s, [A], device)
a = tvm.nd.empty((n,), dtype=A.dtype, device=dev)
f(a)
assert a.numpy()[0] == value + 3
check_target("cuda")
check_target("vulkan -from_device=0")
@tvm.testing.requires_gpu
def test_add_pipeline():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(), name="C")
D = te.compute(A.shape, lambda *i: C(*i) + 1, name="D")
s = te.create_schedule(D.op)
num_thread = 256
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xi, te.thread_axis("threadIdx.x"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
xo, xi = s[D].split(D.op.axis[0], factor=num_thread)
s[D].bind(xi, te.thread_axis("threadIdx.x"))
s[D].bind(xo, te.thread_axis("blockIdx.x"))
def check_target(device, host="stackvm"):
if not tvm.testing.device_enabled(device) or not tvm.testing.device_enabled(host):
return
dev = tvm.device(device, 0)
mhost = tvm.driver.build(s, [A, B, D], target=tvm.target.Target(device, host))
f = mhost.entry_func
n = 1027
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=()).astype(B.dtype), dev)
d = tvm.nd.array(np.zeros(n, dtype=D.dtype), dev)
f(a, b, d)
tvm.testing.assert_allclose |
(d.numpy(), a.numpy() + b.numpy() + 1)
check_target("cuda", host="llvm")
check_target("nvptx", host="llvm")
check_target("vulkan", host="llvm")
check_target("rocm", host="llvm")
if __name__ == "__main__":
test_large_uint_imm()
test_add_pipeline() |
import tvm
from tvm |
import te |
import numpy as np |
import tvm.testing
@tvm.testing.uses_gpu
def test_add_pipeline():
nn = 64
max_threads = 4
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
def extern_generator(ins, outs):
"""Manually write the IR for the extern function, add pipeline"""
ib = tvm.tir.ir_builder.create()
with ib.for_range(0, (n + 1)
ib.emit(
outs[0].vstore(
i * 2, ins[0].vload(i * 2, "float32x2") + tvm.tir.const(1, "float32x2")
)
)
return ib.get()
def extern_generator_gpu(ins, outs):
"""Manually write the IR for the extern function, add pipeline"""
ib = tvm.tir.ir_builder.create()
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", (nn + max_threads - 1)
ib.scope_attr(tx, "thread_extent", max_threads)
idx = bx.var * max_threads + tx.var
with ib.if_scope(ib.likely(idx < n)):
ib.emit(
outs[0].vstore(
idx * 2, ins[0].vload(idx * 2, "float32x2") + tvm.tir.const(1, "float32x2")
)
)
return ib.get()
C_cpu = te.extern(A.shape, [A], extern_generator, name="C")
C_gpu = te.extern(A.shape, [A], extern_generator_gpu, name="C")
s_cpu = te.create_schedule(C_cpu.op)
s_gpu = te.create_schedule(C_gpu.op)
print(tvm.lower(s_cpu, [A, C_cpu], simple_mode=True))
print(tvm.lower(s_gpu, [A, C_gpu], simple_mode=True))
def check_target(target):
if not tvm.testing.device_enabled(target):
return
s = s_gpu if target in ["opencl", "cuda"] else s_cpu
C = C_gpu if target in ["opencl", "cuda"] else C_cpu
f = tvm.build(s, [A, C], target)
dev = tvm.device(target, 0)
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.te |
sting.assert_allclose(c.numpy(), a.numpy() + 1)
check_target("llvm")
check_target("opencl")
check_target("cuda")
def test_pack_buffer_simple():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
def extern_generator(ins, outs):
"""Manually write the IR for the extern function, add pipeline."""
return tvm.tir.call_packed("my_extern_array_func1", ins[0], outs[0])
C = te.extern(A.shape, [A], extern_generator, name="C")
s = te.create_schedule(C.op)
@tvm.register_func
def my_extern_array_func1(aa, bb):
aa.copyto(bb)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
f = tvm.build(s, [A, C], target)
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy())
check_target("stackvm")
check_target("llvm")
def test_pack_buffer_intermediate():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: A[i] + 1, name="B")
def extern_generator(ins, outs):
"""Manually write the IR for the extern function, add pipeline."""
return tvm.tir.call_packed("my_extern_array_func2", ins[0], outs[0])
C = te.extern(B.shape, [B], extern_generator, name="C")
s = te.create_schedule(C.op)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
f = tvm.build(s, [A, C], target)
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
@tvm.register_func
def my_extern_array_func2(aa, bb):
assert aa.shape == a.shape
tvm.testing.assert_allclose(aa.numpy(), a.numpy() + 1) |
aa.copyto(bb)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
check_target("llvm")
if __name__ == "__main__":
test_pack_buffer_simple()
test_pack_buffer_intermediate()
test_add_pipeline() |
import numpy as np |
import os |
import pytest |
import re |
import sys |
import tvm |
import tvm.relay |
import tvm.testing |
import tvm.contrib.hexagon as hexagon
@pytest.fixture(autouse=True)
def register_linker():
original_linker = hexagon.hexagon_link()
hexagon.register_linker(lambda: "/bin/true")
yield None
hexagon.register_linker(original_linker)
@tvm.testing.requires_hexagon
def test_basic():
target = tvm.target.hexagon("v66", hvx=128)
def check_add(offload):
A = tvm.te.placeholder((128,), dtype="uint8", name="A")
B = tvm.te.placeholder((128,), dtype="uint8", name="A")
C = tvm.te.compute((128,), lambda i: A[i] + B[i], name="C")
s = tvm.te.create_schedule(C.op)
if offload:
xo, xi = s[C].split(s[C].op.axis[0], nparts=1)
s[C].bind(xo, tvm.te.thread_axis("pipeline"))
m = tvm.build(s, [C, A, B], target=target, name="offload_add")
hexm = m.imported_modules[0]
else:
hexm = tvm.build(
s, [C, A, B], target=tvm.target.Target(target, target), name="native_add"
)
asm = hexm.get_source("s")
vadds = re.findall(r"v[0-9]+.b = vadd\(v[0-9]+.b,v[0-9]+.b\)", asm)
assert vadds
check_add(True)
check_add(False)
@tvm.testing.requires_hexagon
def test_llvm_target_features():
target = tvm.target.hexagon("v66", hvx=128)
A = tvm.te.placeholder((128,), dtype="uint8", name="A")
C = tvm.te.compute((128,), lambda i: A[i] + 1, name="C")
s = tvm.te.create_schedule(C.op)
m = tvm.build(s, [C, A], target=tvm.target.Target(target, target), name="add_one")
llvm_ir = m.get_source("ll")
fs = re.findall(r"attributes.*\+hvx-length128b", llvm_ir)
assert fs
@tvm.testing.requires_hexagon
def test_alloc_vtcm():
target = tvm.target.hexagon("v66")
buf_len = 2048
A = tvm.te.placeholder((buf_len,), name="A", dtype="int8")
B = tvm.te.placeholder((buf_len,), name="B", dtype="int8")
A_buf = tvm.te.compute((buf_len,), lambda *i: A(*i), "A_buf")
B_buf = tvm.te.compute((buf_len,), lambda *i: B(*i), "B_bu |
f")
C = tvm.te.compute((buf_len,), lambda *i: A_buf(*i) + B_buf(*i), name="C")
s = tvm.te.create_schedule(C.op)
s[A_buf].set_scope("local.vtcm")
s[B_buf].set_scope("local.vtcm")
config = {"tir.add_lower_pass": hexagon.ir_lower_vtcm_pass()}
with tvm.transform.PassContext(config=config):
irmod = tvm.lower(s, [A, B, C], name="alloc_vtcm")
calls = re.findall("HexagonBackend[A-Za-z]*VTCM", str(irmod["alloc_vtcm"]))
assert "HexagonBackendAllocateVTCM" in calls
assert "HexagonBackendFreeVTCM" in calls
@tvm.testing.requires_hexagon
def test_llvm_options():
target = tvm.target.hexagon("v66", llvm_options="-hexagon-noopt")
Zero = tvm.te.compute((10,), lambda _: tvm.tir.const(0, "int32"))
s = tvm.te.create_schedule(Zero.op)
tvm.build(s, [Zero], target=target, name="zero")
assert re.search("-hexagon-noopt", str(target))
if __name__ == "__main__":
tvm.testing.main() |
import collections |
import ctypes |
import json |
import math |
import numpy as np |
import pytest |
import re |
import sys |
import tvm |
import tvm.testing
from tvm |
import te
from tvm.contrib |
import clang, utils
from tvm.relay.backend |
import Runtime
from tvm.script |
import tir as T
from tvm.target.codegen |
import llvm_get_intrinsic_name, llvm_lookup_intrinsic_id
@tvm.testing.requires_llvm
def test_llvm_intrin():
ib = tvm.tir.ir_builder.create()
n = tvm.runtime.convert(4)
A = ib.pointer("float32", name="A")
args = [tvm.tir.call_intrin("handle", "tir.address_of", A[0]), 0, 3, 1]
ib.emit(tvm.tir.Evaluate(tvm.tir.Call("int32", "tir.prefetch", args)))
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "prefetch"))
fcode = tvm.build(mod, None, "llvm")
@tvm.testing.requires_llvm
def test_llvm_void_intrin():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("uint8", name="A")
x = tvm.tir.call_llvm_intrin("", "llvm.va_start", tvm.tir.const(1, "uint32"), A.asobject().data)
ib.emit(x)
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "main"))
fcode = tvm.build(mod, None, "llvm")
@tvm.testing.requires_llvm
def test_llvm_intrinsic_id():
orig_name = "llvm.x86.sse2.pmadd.wd"
intrin_id = llvm_lookup_intrinsic_id(orig_name)
name = llvm_get_intrinsic_name(intrin_id)
assert orig_name == name
@tvm.testing.requires_llvm
def test_llvm_overloaded_intrin():
if tvm.target.codegen.llvm_version_major() < 5:
return
def use_llvm_intrinsic(A, C):
ib = tvm.tir.ir_builder.create()
L = A.vload((0, 0))
I = tvm.tir.call_llvm_pure_intrin(
"int32", "llvm.ctlz", tvm.tir.const(2, "uint32"), L, tvm.tir.const(0, "int1")
)
S = C.vstore((0, 0), I)
ib.emit(S)
return ib.get()
A = tvm.te.placeholder((1, 1), dtype="int32", name="A")
C = tvm.te.extern(
(1, 1), [A], lambda ins, outs: use_llvm_intrinsic(ins[0], outs[0]), name="C", dtype="int32"
)
s = tvm.te.create_schedule(C.op)
f = tvm.build(s, [A, C], target="llvm")
@tvm.testing.requires_llvm
def test_llvm_lookup_intrin():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("uint8x8", name="A")
z |
= tvm.tir.const(0, "int32")
x = tvm.tir.call_llvm_pure_intrin(
"uint8x8", "llvm.ctpop.v8i8", tvm.tir.const(1, "uint32"), A[z]
)
ib.emit(x)
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "main"))
fcode = tvm.build(mod, None, "llvm")
@tvm.testing.requires_llvm
def test_llvm_large_uintimm():
value = (1 << 63) + 123
other = tvm.tir.const(3, "uint64")
A = te.compute((), lambda: tvm.tir.const(value, "uint64") + other, name="A")
s = te.create_schedule(A.op)
def check_llvm():
f = tvm.build(s, [A], "llvm")
dev = tvm.cpu(0)
a = tvm.nd.empty((), dtype=A.dtype, device=dev)
f(a)
assert a.numpy() == value + 3
check_llvm()
@tvm.testing.requires_llvm
def test_llvm_persist_parallel():
n = 128
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1, name="B")
C = te.compute(A.shape, lambda *i: te.sqrt(B(*i)) * 2 + 2, name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=8)
xo1, xo2 = s[C].split(xo, nparts=1)
s[B].compute_at(s[C], xo1)
s[B].parallel(s[B].op.axis[0])
s[B].pragma(s[B].op.axis[0], "parallel_barrier_when_finish")
s[C].parallel(xi)
s[C].pragma(xo1, "parallel_launch_point")
s[C].pragma(xi, "parallel_stride_pattern")
def check_llvm():
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), np.sqrt(a.numpy() + 1) * 2 + 2, rtol=1e-5)
check_llvm()
@tvm.testing.requires_llvm
def test_llvm_flip_pipeline():
def check_llvm(nn, base):
n = tvm.runtime.convert(nn)
A = te.placeholder((n + base), name="A")
C = te.compute((n,), lambda i: A(nn + base - i - 1), name="C")
s = te.create_schedule(C.op)
xo, |
xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.uniform(size=(n + base)).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy()[::-1][:n])
check_llvm(4, 0)
check_llvm(128, 8)
check_llvm(3, 0)
check_llvm(128, 1)
@tvm.testing.requires_llvm
def test_llvm_vadd_pipeline():
def check_llvm(n, lanes):
A = te.placeholder((n,), name="A", dtype="float32x%d" % lanes)
B = te.compute((n,), lambda i: A[i], name="B")
C = te.compute((n,), lambda i: B[i] + tvm.tir.const(1, A.dtype), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], nparts=2)
_, xi = s[C].split(xi, factor=2)
s[C].parallel(xo)
s[C].vectorize(xi)
s[B].compute_at(s[C], xo)
xo, xi = s[B].split(B.op.axis[0], factor=2)
s[B].vectorize(xi)
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
a = tvm.nd.empty((n,), A.dtype).copyfrom(np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), C.dtype, dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
check_llvm(64, 2)
check_llvm(512, 2)
@tvm.testing.requires_llvm
def test_llvm_madd_pipeline():
def check_llvm(nn, base, stride):
n = tvm.runtime.convert(nn)
A = te.placeholder((n + base, stride), name="A")
C = te.compute((n, stride), lambda i, j: A(base + i, j) + 1, name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.uniform(size=(n + base, stride)).astype(A.dtype), dev) |
c = tvm.nd.array(np.zeros((n, stride), dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy()[base:] + 1)
check_llvm(64, 0, 2)
check_llvm(4, 0, 1)
with tvm.transform.PassContext(config={"tir.noalias": False}):
check_llvm(4, 0, 3)
@tvm.testing.requires_llvm
def test_llvm_temp_space():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A(i) + 1, name="B")
C = te.compute(A.shape, lambda i: B(i) + 1, name="C")
s = te.create_schedule(C.op)
def check_llvm():
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1 + 1)
check_llvm()
@tvm.testing.requires_llvm
def test_multiple_func():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
def check_llvm():
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, C], name="fadd2")
m = tvm.build([f1, f2], "llvm")
fadd2 = m["fadd2"]
fadd1 = m["fadd1"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd1(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
fadd2(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_llvm()
@tvm.testing.requires_llvm
def test_llvm_condition(): |
def check_llvm(n, offset):
A = te.placeholder((n,), name="A")
C = te.compute((n,), lambda i: tvm.tir.if_then_else(i >= offset, A[i], 0.0), name="C")
s = te.create_schedule(C.op)
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev)
c = tvm.nd.empty((n,), A.dtype, dev)
f(a, c)
c_np = a.numpy()
c_np[:offset] = 0
tvm.testing.assert_allclose(c.numpy(), c_np)
check_llvm(64, 8)
@tvm.testing.requires_llvm
def test_llvm_bool():
def check_llvm(n):
A = te.placeholder((n,), name="A", dtype="int32")
C = te.compute((n,), lambda i: A[i].equal(1).astype("float"), name="C")
s = te.create_schedule(C.op)
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev)
c = tvm.nd.empty((n,), C.dtype, dev)
f(a, c)
c_np = a.numpy() == 1
tvm.testing.assert_allclose(c.numpy(), c_np)
check_llvm(64)
@tvm.testing.requires_llvm
def test_rank_zero():
def check_llvm(n):
A = te.placeholder((n,), name="A")
scale = te.placeholder((), name="scale")
k = te.reduce_axis((0, n), name="k")
C = te.compute((), lambda: te.sum(A[k] * scale(), axis=k), name="C")
D = te.compute((), lambda: C() + 1)
s = te.create_schedule(D.op)
f = tvm.build(s, [A, scale, D], "llvm")
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev)
sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev)
d = tvm.nd.empty((), D.dtype, dev)
f(a, sc, d)
d_np = np.sum(a.numpy()) * sc.numpy() + 1
tvm.testing.assert_allclose(d.numpy(), d_np)
check_llvm(64)
@tvm.testing.requires_llvm
def test_rank_zero_bound_checkers():
def check_llvm(n):
with tv |
m.transform.PassContext(config={"tir.instrument_bound_checkers": True}):
A = te.placeholder((n,), name="A")
scale = te.placeholder((), name="scale")
k = te.reduce_axis((0, n), name="k")
C = te.compute((), lambda: te.sum(A[k] * scale(), axis=k), name="C")
D = te.compute((), lambda: C() + 1)
s = te.create_schedule(D.op)
f = tvm.build(s, [A, scale, D], "llvm")
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev)
sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev)
d = tvm.nd.empty((), D.dtype, dev)
f(a, sc, d)
d_np = np.sum(a.numpy()) * sc.numpy() + 1
tvm.testing.assert_allclose(d.numpy(), d_np)
check_llvm(64)
@tvm.testing.requires_llvm
def test_alignment():
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] * 3, name="B")
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], factor=8)
s[B].vectorize(tx)
f = tvm.build(s, [A, B], "llvm", name="test_alignment")
lines = f.get_source().split("\n")
for l in lines:
if "align" in l and "4 x float" in l:
assert "align 32" in l
def has_param_alignment():
for l in lines:
if re.search(r"test_alignment_compute_\([^(]*align [0-9]", l):
return True
return False
if tvm.target.codegen.llvm_version_major() >= 5:
assert has_param_alignment()
def has_call_to_assume():
for l in lines:
if re.search(r"call.*llvm.assume", l):
return True
return False
assert has_call_to_assume()
@tvm.testing.requires_llvm
def test_llvm_div():
"""Check that the semantics of div and mod is correct"""
def check(start, end, dstart, dend, dtype, floor_div=False):
div = |
tvm.te.floordiv if floor_div else tvm.tir.truncdiv
mod = tvm.te.floormod if floor_div else tvm.tir.truncmod
A = te.placeholder((end - start + 1,), name="A", dtype=dtype)
B = te.placeholder((dend - dstart + 1,), name="B", dtype=dtype)
def clipa(x):
return tvm.te.min(tvm.tir.const(end, dtype), tvm.te.max(tvm.tir.const(start, dtype), x))
def clipb(x):
return tvm.te.min(
tvm.tir.const(dend, dtype), tvm.te.max(tvm.tir.const(dstart, dtype), x)
)
if start == end:
def clipa(x):
return tvm.tir.const(start, dtype)
if dstart == dend:
def clipb(x):
return tvm.tir.const(dstart, dtype)
[D, M] = te.compute(
(end - start + 1, dend - dstart + 1),
lambda i, j: (div(clipa(A[i]), clipb(B[j])), mod(clipa(A[i]), clipb(B[j]))),
)
s = te.create_schedule([D.op, M.op])
f = tvm.build(s, [A, B, D, M], "llvm")
A_arr = tvm.nd.empty((end - start + 1,), dtype)
B_arr = tvm.nd.empty((dend - dstart + 1,), dtype)
A_arr.copyfrom(np.arange(start, end + 1, dtype=dtype))
B_np = np.arange(dstart, dend + 1, dtype=dtype)
if dend >= 0 and dstart <= 0:
B_np[-dstart] = 1
B_arr.copyfrom(B_np)
D_arr = tvm.nd.empty((end - start + 1, dend - dstart + 1), dtype)
M_arr = tvm.nd.empty((end - start + 1, dend - dstart + 1), dtype)
f(A_arr, B_arr, D_arr, M_arr)
D_arr = D_arr.numpy()
M_arr = M_arr.numpy()
def _show_info():
print("dtype: {}".format(dtype))
print("dividend range: [{}, {}]".format(start, end))
print("divisor range: [{}, {}]".format(dstart, dend))
lowered = tvm.lower(s, [A, B, D, M], simple_mode=True)
print("Lowered code:")
print(lowered)
for i in range(start, end |
+ 1):
for j in range(dstart, dend + 1):
if j == 0:
continue
if floor_div:
dref = i
mref = i % j
else:
dref = int(float(i) / j)
mref = int(math.fmod(i, j))
if D_arr[i - start, j - dstart] != dref:
_show_info()
raise AssertionError(
"Incorrect division result: {}({}, {}) is {} "
"but should be {}".format(
div.__name__, i, j, D_arr[i - start, j - dstart], dref
)
)
if M_arr[i - start, j - dstart] != mref:
_show_info()
raise AssertionError(
"Incorrect modulo result: {}({}, {}) is {} "
"but should be {}".format(
mod.__name__, i, j, M_arr[i - start, j - dstart], mref
)
)
for start, end in [
(-12, -12),
(-11, -1),
(-11, 0),
(0, 0),
(12, 12),
(1, 11),
(0, 11),
(-11, 11),
]:
for dstart, dend in [
(-11, -1),
(-11, 0),
(-4, -4),
(-2, -2),
(1, 11),
(0, 11),
(4, 4),
(2, 2),
(-11, 11),
]:
if end < start or dend < dstart or (dend == 0 and dstart == 0):
continue
check(start, end, dstart, dend, "int32", floor_div=False)
check(start, end, dstart, dend, "int32", floor_div=True)
check(start, end, dstart, dend, "int8", floor_div=False)
check(start, end, dstart, dend, "int8", floor_div=True)
if start >= 0 and dstart >= 0:
check(start, end, dstart, dend, "uint32", floor_div=False)
check(start, end, dstart, dend, "u |
int32", floor_div=True)
for dstart, dend in [(0, 11), (1, 11), (2, 2), (4, 4)]:
check(123, 133, dstart, dend, "uint8", floor_div=False)
check(123, 133, dstart, dend, "uint8", floor_div=True)
check(0, 255, dstart, dend, "uint8", floor_div=False)
check(0, 255, dstart, dend, "uint8", floor_div=True)
@tvm.testing.requires_llvm
def test_llvm_fp_math():
def check_llvm_reciprocal(n):
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: te.div(1.0, (1e37 * A[i])), name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
a = tvm.nd.array(np.full((n,), 100, "float32"))
b = tvm.nd.empty((n,), "float32")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np.zeros((n,), "float32"))
check_llvm_reciprocal(4)
check_llvm_reciprocal(8)
check_llvm_reciprocal(16)
def check_llvm_sigmoid(n):
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: te.sigmoid(A[i]), name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
a = tvm.nd.array(np.full((n,), -1000, "float32"))
b = tvm.nd.empty((n,), "float32")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np.zeros((n,), "float32"))
check_llvm_sigmoid(4)
check_llvm_sigmoid(8)
check_llvm_sigmoid(16)
@tvm.testing.requires_llvm
def test_dwarf_debug_information():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
def check_llvm_object():
if tvm.target.codegen.llvm_version_major() < 5:
return
if tvm.target.codegen.llvm_version_major() > 6:
return
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, |
C], name="fadd2")
m = tvm.build([f1, f2], "llvm")
temp = utils.tempdir()
o_path = temp.relpath("temp.o")
m.save(o_path) |
import shutil |
import subprocess |
import sys
if shutil.which("dwarfdump"):
output = subprocess.check_output(["dwarfdump", o_path])
assert re.search(r"""DW_AT_name\\t\("fadd1"\)""", str(output))
assert re.search(r"""DW_AT_name\\t\("fadd2"\)""", str(output))
if shutil.which("gobjdump"):
output = subprocess.check_output(["gobjdump", "--dwarf", o_path])
assert re.search(r"""DW_AT_name.*fadd1""", str(output))
assert re.search(r"""DW_AT_name.*fadd2""", str(output))
if shutil.which("objdump") and sys.platform != "darwin":
output = subprocess.check_output(["objdump", "--dwarf", o_path])
assert re.search(r"""DW_AT_name.*fadd1""", str(output))
assert re.search(r"""DW_AT_name.*fadd2""", str(output))
def check_llvm_ir():
if tvm.target.codegen.llvm_version_major() < 5:
return
if tvm.target.codegen.llvm_version_major() > 6:
return
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, C], name="fadd2")
m = tvm.build([f1, f2], target="llvm -mtriple=aarch64-linux-gnu")
ll = m.get_source("ll") |
import re
assert not re.search(r""""Dwarf Version""" "", ll)
assert re.search(r"""llvm.dbg.value""", ll)
m = tvm.build([f1, f2], target="llvm -mtriple=x86_64-apple-darwin-macho")
ll = m.get_source("ll")
assert re.search(r"""i32 4, !"Dwarf Version", i32 2""", ll)
assert re.search(r"""llvm.dbg.value""", ll)
check_llvm_object()
check_llvm_ir()
@tvm.testing.requires_llvm
def test_llvm_shuffle():
a = te.placeholder((8,), "int32")
b = te.placeholder((8,), "int32")
c = te.compute((8,), lambda x: a[x] + b[7 - x])
sch = te.create_schedule(c.op)
def my_vectorize():
def vectorizer(op):
store = op.body
idx = tvm.tir.Ramp(tvm.tir.const(0, "int32"), tvm.tir.const(1, "int32"), 8)
value = store.value
b_idx = tvm.tir.Shuffle([idx], [tvm.tir.const(i, "int32") for i in range(7, -1, -1)])
new_a = tvm.tir.BufferLoad(value.a.buffer, [idx])
new_b = tvm.tir.BufferLoad(value.b.buffer, [b_idx])
value = new_a + new_b
return tvm.tir.BufferStore(store.buffer, new_a + new_b, [idx])
def _transform(f, *_):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, vectorizer, ["tir.For"])
)
return tvm.tir.transform.prim_func_pass(_transform, opt_level=0, name="my_vectorize")
with tvm.transform.PassContext(config={"tir.add_lower_pass": [(1, my_vectorize())]}):
ir = tvm.lower(sch, [a, b, c], simple_mode=True)
module = tvm.build(sch, [a, b, c])
a_ = tvm.nd.array(np.arange(1, 9, dtype="int32"))
b_ = tvm.nd.array(np.arange(8, 0, -1, dtype="int32"))
c_ = tvm.nd.array(np.zeros((8,), dtype="int32"))
module(a_, b_, c_)
tvm.testing.assert_allclose(c_.numpy(), (a_.numpy() * 2).astype("int32"))
def np_float2np_bf16(arr):
"""Convert a numpy array of float to a numpy array
of bf16 in uint16"""
orig = arr.view("<u4")
bias = |
np.bitwise_and(np.right_shift(orig, 16), 1) + 0x7FFF
return np.right_shift(orig + bias, 16).astype("uint16")
def np_float2tvm_bf16(arr):
"""Convert a numpy array of float to a TVM array
of bf16"""
nparr = np_float2np_bf16(arr)
return tvm.nd.empty(nparr.shape, "uint16").copyfrom(nparr)
def np_bf162np_float(arr):
"""Convert a numpy array of bf16 (uint16) to a numpy array
of float"""
u32 = np.left_shift(arr.astype("uint32"), 16)
return u32.view("<f4")
def np_bf16_cast_and_cast_back(arr):
"""Convert a numpy array of float to bf16 and cast back"""
return np_bf162np_float(np_float2np_bf16(arr))
@tvm.testing.requires_llvm
def test_llvm_bf16():
def dotest(do_vectorize):
np.random.seed(122)
A = te.placeholder((32,), dtype="bfloat16")
B = te.placeholder((32,), dtype="bfloat16")
d = te.compute((32,), lambda x: A[x] + B[x])
sch = te.create_schedule(d.op)
print(tvm.lower(sch, [A, B, d]))
if do_vectorize:
sch[d].vectorize(d.op.axis[0])
module = tvm.build(sch, [A, B, d])
npa = np.random.rand(32).astype("float32")
npb = np.random.rand(32).astype("float32")
va = np_bf16_cast_and_cast_back(npa)
vb = np_bf16_cast_and_cast_back(npb)
res = np_bf16_cast_and_cast_back(va + vb)
a_ = np_float2tvm_bf16(npa)
b_ = np_float2tvm_bf16(npb)
c_ = tvm.nd.empty((32,), "uint16")
module(a_, b_, c_)
tvm.testing.assert_allclose(np_bf162np_float(c_.numpy()), res)
dotest(True)
dotest(False)
@tvm.testing.requires_llvm
def test_llvm_crt_static_lib():
A = te.placeholder((32,), dtype="bfloat16")
B = te.placeholder((32,), dtype="bfloat16")
d = te.compute((32,), lambda x: A[x] + B[x])
sch = te.create_schedule(d.op)
module = tvm.build(
sch,
[A, B, d],
target=tvm.target.Target("llvm"),
runtime=Runtime("crt", {"system-lib": True}),
)
print(module.get_source())
module.save("test.o" |
)
def atomic_add(x, y):
return tvm.tir.call_intrin(y.dtype, "tir.atomic_add", x, y)
@tvm.testing.requires_llvm
def test_llvm_lower_atomic():
def do_atomic_add(A):
ib = tvm.tir.ir_builder.create()
n = A.shape[0]
atomic_add_return = ib.allocate(A.dtype, (1,), name="atomic_add_return", scope="local")
one = tvm.tir.const(1, A.dtype)
A_ptr = ib.buffer_ptr(A)
with ib.for_range(0, n, name="i", kind="parallel") as i:
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", A_ptr[0]), one
)
return ib.get()
A = tvm.te.placeholder((100,), dtype="int32", name="A")
C = tvm.te.extern((100,), [A], lambda ins, _: do_atomic_add(ins[0]), name="C", dtype="int32")
s = tvm.te.create_schedule(C.op)
@tvm.testing.requires_llvm
@tvm.testing.requires_gpu
def test_llvm_gpu_lower_atomic():
def do_atomic_add(A):
ib = tvm.tir.ir_builder.create()
n = A.shape[0]
atomic_add_return = ib.allocate(A.dtype, (1,), name="atomic_add_return", scope="local")
one = tvm.tir.const(1, A.dtype)
A_ptr = ib.buffer_ptr(A)
nthread_tx = 64
with ib.new_scope():
nthread_bx = (n + nthread_tx - 1)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", A_ptr[0]), one
)
return ib.get()
size = 1024
for dtype in ["int32"]:
A = tvm.te.placeholder((size,), dtype=dtype, name="A")
C = tvm.te.extern((size,), [A], lambda ins, _: do_atomic_add(ins[0]), dtype=dtype)
s = tvm.te.create_schedule(C.op)
f = tvm.build(s, [A], target="nvptx")
dev = tvm.cuda()
a = tvm.nd.array(np.zeros((size,)).as |
type(A.dtype), dev)
f(a)
ref = np.zeros((size,)).astype(A.dtype)
ref[0] = size
tvm.testing.assert_allclose(a.numpy(), ref, rtol=1e-5)
@tvm.testing.requires_llvm
def test_llvm_order_functions():
"""Check that functions in the LLVM module are ordered alphabetically."""
def make_call_extern(caller, callee):
ib = tvm.tir.ir_builder.create()
v = tvm.te.var("v", dtype="float32")
t = tvm.tir.call_extern("float32", callee, v)
ib.emit(t)
return tvm.tir.PrimFunc([v], ib.get()).with_attr("global_symbol", caller)
functions = {
"Danny": make_call_extern("Danny", "Dave"),
"Sammy": make_call_extern("Sammy", "Eve"),
"Kirby": make_call_extern("Kirby", "Fred"),
}
mod = tvm.IRModule(functions=functions)
ir_text = tvm.build(mod, None, target="llvm").get_source("ll")
matches = re.findall(r"^define[^@]*@([a-zA-Z][a-zA-Z0-9_]*)", ir_text, re.MULTILINE)
assert matches == sorted(matches)
@tvm.testing.requires_llvm
@tvm.testing.skip_if_32bit
def test_llvm_import():
"""all-platform-minimal-test: check shell dependent clang behavior."""
cc_code = """
extern "C" float my_add(float x, float y) {
return x + y;
}
"""
n = 10
A = te.placeholder((n,), name="A")
B = te.compute(
(n,), lambda *i: tvm.tir.call_pure_extern("float32", "my_add", A(*i), 1.0), name="B"
)
def check_llvm(use_file):
if not clang.find_clang(required=False):
print("skip because clang is not available")
return
temp = utils.tempdir()
ll_path = temp.relpath("temp.ll")
ll_code = clang.create_llvm(cc_code, output=ll_path)
s = te.create_schedule(B.op)
if use_file:
s[B].pragma(s[B].op.axis[0], "import_llvm", ll_path)
else:
s[B].pragma(s[B].op.axis[0], "import_llvm", ll_code)
f = tvm.build(s, [A, B], "llvm")
dev = tvm.cpu(0) |
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1.0)
check_llvm(use_file=True)
check_llvm(use_file=False)
@tvm.testing.requires_llvm
def test_llvm_scalar_concat():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.decl_buffer((1,), "int32x2")
s = tvm.tir.Shuffle([x, y], [0, 1])
f = tvm.tir.PrimFunc([x, y, z], z.vstore(0, s))
mod = tvm.ir.IRModule.from_expr(f.with_attr("global_symbol", "codegen_scalar_concat"))
with tvm.transform.PassContext(config={"tir.disable_assert": True}):
m = tvm.build(mod, [x, y, z], target="llvm")
@tvm.testing.requires_llvm
def test_raise_exception_during_codegen():
@T.prim_func
def threadpool_nested_parallel_loop(
A: T.Buffer[(4, 4), "float32"], B: T.Buffer[(4, 4), "float32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i in T.parallel(4):
for j in T.parallel(4):
B[i, j] = A[i, j] * 2.0
with pytest.raises(tvm.TVMError) as e:
tvm.build({"llvm": tvm.IRModule.from_expr(threadpool_nested_parallel_loop)})
msg = str(e)
assert msg.find("Nested parallel loop is not supported") != -1
@tvm.testing.requires_llvm
def test_llvm_target_attributes():
"""Check that when LLVM codegen creates new functions, they get the same target
attributes as the original function.
"""
n = te.var()
A = te.placeholder((n,), name="A", dtype="float32")
B = te.compute((n,), lambda i: A[i], name="B")
C = te.compute((n,), lambda i: B[i] + tvm.tir.const(1, A.dtype), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], nparts=2)
s[C].parallel(xo)
target_llvm = "llvm -mcpu=skylake -mattr=+avx512f"
target = tvm.target.Target(target_llvm, host=target_llvm)
module = tvm.build(s, [A, B, C, n |
], target=target, name="test_func")
llvm_ir = module.get_source()
llvm_ir_lines = llvm_ir.split("\n")
attribute_definitions = dict()
attributes_with_target = dict()
functions_with_target = []
for line in llvm_ir_lines:
func_def = re.match(
"define.* @(?P<func_name>[^(]*)[(].*
)
if func_def:
functions_with_target.append(func_def.group("func_name"))
attributes_with_target[func_def.group("attr_num")] = True
continue
attr_def = re.match("attributes
if attr_def:
attribute_definitions[attr_def.group("attr_num")] = attr_def.group("attr_list")
for k in list(attributes_with_target.keys()):
assert re.match('.*"target-cpu"="skylake".*', attribute_definitions[k])
assert re.match('.*"target-features"=".*[+]avx512f.*".*', attribute_definitions[k])
expected_functions = ["test_func", "test_func_compute_", "__tvm_parallel_lambda"]
for n in expected_functions:
assert n in functions_with_target
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import topi |
import unittest
from tvm.contrib.nvcc |
import have_fp16, have_int8, have_bf16
from tvm.contrib |
import nvcc |
import tvm.testing
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
@tvm.testing.requires_gpu
@tvm.testing.requires_metal
def test_metal_inf_nan():
target = "metal"
def check_inf_nan(dev, n, value, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
fun(a, c)
dev = tvm.device(target, 0)
check_inf_nan(dev, 1, -float("inf"), "float32")
check_inf_nan(dev, 1, -float("inf"), "float16")
check_inf_nan(dev, 1, float("inf"), "float32")
check_inf_nan(dev, 1, float("inf"), "float16")
check_inf_nan(dev, 1, float("nan"), "float32")
check_inf_nan(dev, 1, float("nan"), "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_metal
def test_metal_erf():
target = "metal"
def check_erf(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
C = te.compute(A.shape, lambda *i: te.erf(A(*i)), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
fun(a, c)
dev = tvm.device(target, 0)
check_erf(dev, 1, "float32")
check_erf(dev, 1, "float16")
if __name__ == "__main__":
test_metal_inf_nan()
test_metal_erf() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.