text
stringlengths 1
2.05k
|
---|
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
def verify_space_to_depth(block_size, batch, in_channel, in_height, in_width, layout="NCHW"):
out_channel = int(in_channel * (block_size * block_size))
out_height = int(in_height / block_size)
out_width = int(in_width / block_size)
if layout == "NCHW":
in_shape = [batch, in_channel, in_height, in_width]
out_shape = [batch, out_channel, out_height, out_width]
elif layout == "NHWC":
in_shape = [batch, in_height, in_width, in_channel]
out_shape = [batch, out_height, out_width, out_channel]
else:
raise NotImplementedError("Layout not supported {}".format(layout))
A = te.placeholder(in_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=in_shape).astype(dtype)
B = topi.nn.space_to_depth(A, block_size=block_size, layout=layout)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 3, 1, 2])
b_np = tvm.topi.testing.space_to_depth_python(a_np, block_size)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 2, 3, 1])
b_np = np.transpose(b_np, axes=[0, 2, 3, 1])
def check_device(device, dev):
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for device, dev in tvm.testing.enabled_targets():
check_device(device, dev)
@tvm.testing.uses_gpu
def test_space_to_depth():
for layout in ["NCHW", "NHWC"]:
verify_space_to_depth(2, 1, 1, 2, 2, layout=layout)
verify_space_to_depth(2, 1, 32, 32, 32, layout=layout)
verify_space_to_depth(8, 1, 32, 64, 64, layout=layout)
verify_space_to_depth(4, 8, 32, 32, 32, layout=layout)
v |
erify_space_to_depth(4, 8, 32, 128, 128, layout=layout)
if __name__ == "__main__":
test_space_to_depth() |
"""Test code for sparse operator""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi
from tvm |
import relay |
import tvm.topi.testing
from tvm.topi.utils |
import get_const_tuple |
import tvm.contrib.sparse as tvmsp
from collections |
import namedtuple |
import time |
import scipy.sparse as sp |
import tvm.testing
_sparse_dense_implement = {
"generic": (topi.nn.sparse_dense, topi.generic.schedule_sparse_dense),
"cuda": (topi.cuda.sparse_dense, topi.cuda.schedule_sparse_dense),
"x86": (topi.nn.sparse_dense, topi.x86.schedule_sparse_dense),
}
def verify_dynamic_csrmv(batch, in_dim, out_dim, dtype, use_bias=True):
nr, nc, n = te.var("nr"), te.var("nc"), te.var("n")
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, dtype=dtype, name="A")
B = te.placeholder((in_dim, 1), dtype=dtype, name="B")
C = te.placeholder((nr,), dtype=dtype, name="C")
D = topi.sparse.csrmv(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
dtype = A.dtype
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_dim), high=100).astype(dtype)
b_np = np.random.uniform(size=(in_dim, 1), high=100).astype(dtype)
c_np = np.random.uniform(size=(batch,), high=100).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np) + c_np.reshape((batch, 1))
else:
d_np = np.dot(a_np, b_np)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, dev)
_nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
assert a.shape[0] == a.indptr.shape[0] - 1
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros((_nr, 1), dtype=dtype), dev)
assert a.data.dtype == A.data.dtype
assert a.indices.dtype == A.indices.dtype
assert a.indptr.dtype == A.indptr.dtype
f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmv")
f(_nr, a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e- |
4, atol=1e-4)
for device in ["llvm"]:
check_device(device)
def verify_dynamic_csrmm(batch, in_dim, out_dim, dtype, use_bias=True):
nr, nc, n = te.var("nr"), te.var("nc"), te.var("n")
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, dtype=dtype, name="A")
B = te.placeholder((in_dim, out_dim), dtype=dtype, name="B")
C = te.placeholder((nr,), dtype=dtype, name="C")
D = topi.sparse.csrmm(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
dtype = A.dtype
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_dim), high=100).astype(dtype)
b_np = np.random.uniform(size=(in_dim, out_dim), high=100).astype(dtype)
c_np = np.random.uniform(size=(batch,), high=100).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np) + c_np.reshape((batch, 1))
else:
d_np = np.dot(a_np, b_np)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, dev)
_nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
assert a.shape[0] == a.indptr.shape[0] - 1
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros((_nr, out_dim), dtype=dtype), dev)
f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmm")
f(_nr, a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-2, atol=1e-2)
for device in ["llvm"]:
check_device(device)
def verify_dense_si(batch, in_dim, out_dim, use_bias=True, dtype="float32"):
nonzeros = te.var("nonzeros")
A = tvmsp.placeholder(shape=(batch, in_dim), nonzeros=nonzeros, dtype=dtype, name="A")
B = te.placeholder((out_dim, in_di |
m), dtype=dtype, name="B")
C = te.placeholder((out_dim,), dtype=dtype, name="C")
D = topi.sparse.dense(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
def get_ref_data():
mag = 10.0
a_np = np.maximum(
mag * (np.random.uniform(size=(batch, in_dim)).astype("float32") - 0.5), 0.0
).astype(dtype)
b_np = (mag * (np.random.uniform(size=(out_dim, in_dim)).astype("float32") - 0.5)).astype(
dtype
)
c_np = (mag * (np.random.uniform(size=(out_dim,)).astype("float32") - 0.5)).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np.T) + c_np
else:
d_np = np.dot(a_np, b_np.T)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev)
f = tvm.build(s, [A.data, A.indices, A.indptr, B, C, D], device, name="dense")
f(a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4)
check_device("llvm")
def verify_dense_sw(batch, in_dim, out_dim, use_bias=True, dtype="float32"):
nonzeros = te.var("nonzeros")
A = te.placeholder((batch, in_dim), dtype=dtype, name="A")
B = tvmsp.placeholder(shape=(out_dim, in_dim), nonzeros=nonzeros, dtype=dtype, name="B")
C = te.placeholder((out_dim,), dtype=dtype, name="C")
D = topi.sparse.dense(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
def get_ref_data():
mag = 10.0
a_np = (mag * (np.random.uniform(size=(batch, in_dim)).astype("float32") - 0.5)).astype(
dtype |
)
b_np = np.maximum(
mag * (np.random.uniform(size=(out_dim, in_dim)).astype("float32") - 0.5), 0.0
).astype(dtype)
c_np = (mag * (np.random.uniform(size=(out_dim,)).astype("float32") - 0.5)).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np.T) + c_np
else:
d_np = np.dot(a_np, b_np.T)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvm.nd.array(a_np, dev)
b = tvmsp.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B.data, B.indices, B.indptr, C, D], device, name="dense")
f(a, b.data, b.indices, b.indptr, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4)
check_device("llvm")
def test_csrmv():
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, dtype="float32", use_bias=False)
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, dtype="float64", use_bias=True)
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, dtype="int32", use_bias=True)
def test_csrmm():
M, K, N = 5, 7, 2
verify_dynamic_csrmm(batch=M, in_dim=K, out_dim=N, dtype="int64", use_bias=False)
verify_dynamic_csrmm(batch=M, in_dim=K, out_dim=N, dtype="float64", use_bias=True)
def test_dense_si():
M, K, N = 3, 5, 2
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="float32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="float32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=F |
alse, dtype="int16")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int16")
def test_dense_sw():
M, K, N = 3, 5, 2
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="float32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="float32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int16")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int16")
def test_dense():
test_dense_si()
test_dense_sw()
def test_sparse_dense_csr():
M, N, K, density = 1, 17, 47, 0.2
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = sp.random(N, K, density=density, format="csr", dtype="float32")
W_np = W_sp_np.todense()
Y_np = X_np.dot(W_np.T)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr)
s = te.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(
tvm.nd.array(X_np),
tvm.nd.array(W_sp_np.data),
tvm.nd.array(W_sp_np.indices),
tvm.nd.array(W_sp_np.indptr),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
def test_sparse_dense_csr_reverse():
M, N, K, density = 1, 17, 47, 0.2
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = sp.random(N, K, density=density, format="csr", dtype="float32")
W_np = W_sp_np.todense()
Y_np = W_np.dot(X_np.T)
W_data = te.placeholder(shape= |
W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr, sparse_lhs=True)
s = te.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(
tvm.nd.array(X_np),
tvm.nd.array(W_sp_np.data),
tvm.nd.array(W_sp_np.indices),
tvm.nd.array(W_sp_np.indptr),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
def test_sparse_transpose_csr():
N, density = 1023, 0.3
X_sp = sp.random(N, N, density=density, format="csr", dtype="float32")
X_sp_T = X_sp.transpose()
X_np_T = X_sp_T.todense()
X_data = te.placeholder(shape=X_sp.data.shape, dtype=str(X_sp.data.dtype))
X_indices = te.placeholder(shape=X_sp.indices.shape, dtype=str(X_sp.indices.dtype))
X_indptr = te.placeholder(shape=X_sp.indptr.shape, dtype=str(X_sp.indptr.dtype))
X_T_data, X_T_indices, X_T_indptr = topi.nn.sparse_transpose(X_data, X_indices, X_indptr)
s = te.create_schedule([X_T_data.op, X_T_indices.op, X_T_indptr.op])
func = tvm.build(s, [X_data, X_indices, X_indptr, X_T_data, X_T_indices, X_T_indptr])
X_T_data_tvm = tvm.nd.array(np.zeros(X_sp_T.data.shape, dtype=X_sp_T.data.dtype))
X_T_indices_tvm = tvm.nd.array(np.zeros(X_sp_T.indices.shape, dtype=X_sp_T.indices.dtype))
X_T_indptr_tvm = tvm.nd.array(np.zeros(X_sp_T.indptr.shape, dtype=X_sp_T.indptr.dtype))
func(
tvm.nd.array(X_sp.data),
tvm.nd.array(X_sp.indices),
tvm.nd.array(X_sp.indptr),
X_T_data_tvm,
X_T_indices_tvm,
X_T_indptr_tvm,
)
X_T_out = sp.csr_matrix(
(X_T_data_tvm.numpy(), X_T_indices_tvm.numpy(), X_ |
T_indptr_tvm.numpy()), shape=(N, N)
).todense()
tvm.testing.assert_allclose(X_np_T, X_T_out, atol=1e-4, rtol=1e-4)
def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype): |
import itertools
Y = np.zeros((M, N), dtype=dtype)
assert M % BS_R == 0
assert N % BS_C == 0
nnz = int(density * M * N)
num_blocks = int(nnz / (BS_R * BS_C)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, M, BS_R), range(0, N, BS_C))))
assert candidate_blocks.shape[0] == M
chosen_blocks = candidate_blocks[
np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)
]
for i in range(len(chosen_blocks)):
r, c = chosen_blocks[i]
Y[r : r + BS_R, c : c + BS_C] = np.random.randn(BS_R, BS_C)
s = sp.bsr_matrix(Y, blocksize=(BS_R, BS_C))
assert s.data.shape == (num_blocks, BS_R, BS_C)
assert s.indices.shape == (num_blocks,)
assert s.indptr.shape == (M
return s
def verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, use_relu, device, target):
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = X_np @ W_np.T
if use_relu:
Y_np = np.maximum(Y_np, 0.0)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
fcompute, fschedule = tvm.topi.testing.dispatch(target, _sparse_dense_implement)
with tvm.target.Target(target):
Y = fcompute(X, W_data, W_indices, W_indptr)
if use_relu:
Y = topi.nn.relu(Y)
s = fschedule([Y])
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype), device=device)
func(
tvm.nd.array(X_np, device=device),
tvm.nd.array(W_sp_np.data, device=device),
tvm.nd.array(W_sp_np.indices, device=device),
tvm.nd.arra |
y(W_sp_np.indptr, device=device),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_sparse_dense_bsr_relu(dev, target):
M, N, K, BS_R, BS_C, density = 1, 64, 128, 8, 16, 0.9
verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, True, dev, target)
verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, False, dev, target)
def test_sparse_dense_bsr_reverse():
M, N, K, BS_R, BS_C, density = 1, 64, 128, 8, 16, 0.9
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = W_np.dot(X_np.T)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr, sparse_lhs=True)
s = te.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(
tvm.nd.array(X_np),
tvm.nd.array(W_sp_np.data),
tvm.nd.array(W_sp_np.indices),
tvm.nd.array(W_sp_np.indptr),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_sparse_dense_bsr_randomized():
for _ in range(20):
BS_R = np.random.randint(1, 16)
BS_C = np.random.randint(1, 16)
M = np.random.randint(1, 32)
N = int(np.random.randint(1, 16) * BS_R)
K = int(np.random.randint(1, 16) * BS_C)
density = np.clip(np.random.random(), 0.1, 0.9)
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtyp |
e="float32")
W_np = W_sp_np.todense()
Y_np = np.array(X_np.dot(W_np.T))
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _sparse_dense_implement)
with tvm.target.Target(device):
Y = fcompute(X, W_data, W_indices, W_indptr)
s = fschedule([Y])
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype), device=dev)
func(
tvm.nd.array(X_np, device=dev),
tvm.nd.array(W_sp_np.data, device=dev),
tvm.nd.array(W_sp_np.indices, device=dev),
tvm.nd.array(W_sp_np.indptr, device=dev),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-5, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_sparse_dense_padded_gpu(target, dev):
M = 128
N = 1280
K = 128
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, 1, 1, density=0.01, dtype="float32")
W_sp_np_padded = tvm.topi.cuda.pad_sparse_matrix(W_sp_np, 32)
W_np = W_sp_np.todense()
Y_np = X_np @ W_sp_np.T
W_data = te.placeholder(shape=W_sp_np_padded.data.shape, dtype=str(W_sp_np_padded.data.d |
type))
W_indices = te.placeholder(
shape=W_sp_np_padded.indices.shape, dtype=str(W_sp_np_padded.indices.dtype)
)
W_indptr = te.placeholder(
shape=W_sp_np_padded.indptr.shape, dtype=str(W_sp_np_padded.indptr.dtype)
)
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
with tvm.target.Target(target):
Y = topi.cuda.sparse_dense_padded(X, W_data, W_indices, W_indptr)
s = topi.cuda.schedule_sparse_dense_padded([Y])
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype), device=dev)
func(
tvm.nd.array(X_np, device=dev),
tvm.nd.array(W_sp_np_padded.data, device=dev),
tvm.nd.array(W_sp_np_padded.indices, device=dev),
tvm.nd.array(W_sp_np_padded.indptr, device=dev),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-5, rtol=1e-5)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_sparse_dense_padded_alter_op(target, dev):
with tvm.target.Target(target):
M = 128
N = 16
K = 128
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, 2, 2, density=0.01, dtype="float32")
x = relay.var("x", relay.TensorType(X_np.shape, "float32"))
mult = relay.op.nn.sparse_dense(
x,
(
relay.Constant(tvm.nd.array(W_sp_np.data)),
relay.Constant(tvm.nd.array(W_sp_np.indices)),
relay.Constant(tvm.nd.array(W_sp_np.indptr)),
),
)
f = relay.Function([x], mult)
f_ = relay.transform.InferType()(tvm.IRModule.from_expr(f))
f_ = relay.transform.AlterOpLayout()(f_)
assert f_["main"].body.op.name == "nn.internal.sparse_dense_padded"
with tvm.transform.PassContext(opt_level=3, required_pass="AlterOpLayout"):
x = relay.build(tvm.IRModule.from_expr(f), target=target)
def test_sp |
arse_add_csr():
for indices_dtype in ["int32", "int64"]:
for data_dtype in ["float32", "float64"]:
M, K, density = 3, 49, 0.2
X_np = np.random.randn(M, K).astype(data_dtype)
Y_sp_np = sp.random(M, K, density=density, format="csr", dtype=data_dtype)
Y_np = Y_sp_np.todense()
Z_np = X_np + Y_np
Y_data = te.placeholder(shape=Y_sp_np.data.shape, dtype=data_dtype)
Y_indices = te.placeholder(shape=Y_sp_np.indices.shape, dtype=indices_dtype)
Y_indptr = te.placeholder(shape=Y_sp_np.indptr.shape, dtype=indices_dtype)
X = te.placeholder(shape=X_np.shape, dtype=data_dtype)
Z = topi.nn.sparse_add(X, Y_data, Y_indices, Y_indptr)
s = te.create_schedule(Z.op)
func = tvm.build(s, [X, Y_data, Y_indices, Y_indptr, Z])
Z_tvm = tvm.nd.array(np.zeros(Z_np.shape, dtype=Z_np.dtype))
func(
tvm.nd.array(X_np.astype(data_dtype)),
tvm.nd.array(Y_sp_np.data.astype(data_dtype)),
tvm.nd.array(Y_sp_np.indices.astype(indices_dtype)),
tvm.nd.array(Y_sp_np.indptr.astype(indices_dtype)),
Z_tvm,
)
tvm.testing.assert_allclose(Z_tvm.numpy(), Z_np, atol=1e-4, rtol=1e-4)
def verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, BS_C, density, layout):
if layout == "NHWC":
X_np = np.random.randn(M, H, W, K).astype("float32")
elif layout == "NCHW":
X_np = np.random.randn(M, K, H, W).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
if layout == "NHWC":
Y_np = tvm.topi.testing.conv2d_nhwc_python(X_np, np.array(W_np).T.reshape(1, 1, K, N), 1, 0)
elif layout == "NCHW":
Y_np = tvm.topi.testing.conv2d_nchw_python(X_np, np.array(W_np).reshape(N, K, 1, 1), 1, 0)
if BS_C == 1:
W_data = te.placeholder(shape=W_sp_np.data.shape[:-1], dtype=str(W_sp_np.d |
ata.dtype))
W_sp_np_data = W_sp_np.data.reshape(W_sp_np.data.shape[0], BS_R)
else:
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_sp_np_data = W_sp_np.data
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_conv2d(X, W_data, W_indices, W_indptr, layout)
s = te.create_schedule(Y.op)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype="float32"))
func(
tvm.nd.array(X_np, dev),
tvm.nd.array(W_sp_np_data, dev),
tvm.nd.array(W_sp_np.indices, dev),
tvm.nd.array(W_sp_np.indptr, dev),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np.astype("float32"), atol=1e-4, rtol=1e-4)
check_device("llvm")
def test_sparse_conv2d_bsr():
M, H, W, N, K, BS_R, BS_C, density = 1, 32, 32, 128, 64, 8, 16, 0.9
verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, BS_C, density, "NHWC")
verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, BS_C, density, "NCHW")
verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, 1, density, "NHWC")
if __name__ == "__main__":
test_sparse_conv2d() |
"""Test code for tensor operator""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.contrib.nvcc |
import have_fp16 |
import tvm.testing
def verify_elemwise_sum(num_args, dtype):
shape = (3, 5, 4)
tvm_placeholders = []
for i in range(num_args):
tvm_placeholders.append(te.placeholder(shape, name="data" + str(i), dtype=dtype))
esum = topi.elemwise_sum(tvm_placeholders)
s = te.create_schedule([esum.op])
@memoize("topi.tests.test_topi_elemwise_sum")
def get_ref_data():
np_nd = [np.random.uniform(0, 10, size=shape).astype(dtype) for i in range(num_args)]
return np_nd
np_nd = get_ref_data()
def check_target(target):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
dev = tvm.device(target, 0)
out = tvm.nd.array(np.zeros(shape, dtype=dtype), dev)
f = tvm.build(s, tvm_placeholders + [esum], target, name="elemwise_sum")
tvm_nd = [tvm.nd.array(nd, dev) for nd in np_nd] + [out]
f(*tvm_nd)
np_out = np.sum(np.array(np_nd), axis=0)
tvm.testing.assert_allclose(out.numpy(), np_out, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
def verify_full(shape, dtype, fill_value):
A = te.placeholder(shape, dtype=dtype, name="A")
B = topi.full_like(A, fill_value=fill_value)
C = topi.full(shape=shape, dtype=dtype, fill_value=fill_value)
s1 = te.create_schedule([B.op])
s2 = te.create_schedule([C.op])
@memoize("topi.tests.test_topi_full")
def get_ref_data():
return np.full(shape, fill_value, dtype)
np_nd = get_ref_data()
def check_target(target):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
dev = tvm.device(target, 0)
out = tvm.nd.array(np.zeros(shape, dtype=dtype), dev)
f = tvm.build(s1, [A, B], target, name="full_like")
f(tvm.nd.array(np.zeros(shape, dtype), dev), out)
tvm.testing.assert_allclose(out.numpy(), np_nd, rtol=1e-5)
f = tvm.build(s2, [C], targ |
et, name="full")
f(out)
tvm.testing.assert_allclose(out.numpy(), np_nd, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
def verify_vectorization(n, m, dtype):
def check_targeta(targeta):
if not tvm.testing.device_enabled(targeta):
print("Skip because %s is not enabled" % targeta)
return
if dtype == "float16" and targeta == "cuda" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
with tvm.target.Target(targeta):
dev = tvm.device(targeta, 0)
A = te.placeholder((n, m), name="A", dtype=dtype)
B = te.compute((n, m), lambda i, j: A[i, j] + tvm.tir.const(1, A.dtype), name="B")
S = tvm.topi.testing.get_elemwise_schedule(targeta)(B)
fun = tvm.build(S, [A, B], targeta)
np_A = tvm.nd.empty((n, m), A.dtype, dev).copyfrom(np.random.uniform(size=(n, m)))
np_B = tvm.nd.empty((n, m), B.dtype, dev)
fun(np_A, np_B)
tvm.testing.assert_allclose(np_B.numpy(), np_A.numpy() + 1, rtol=1e-5)
for targeta in ["cuda"]:
check_targeta(targeta)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorization():
verify_vectorization(128, 64, "float16")
def test_elemwise_sum():
verify_elemwise_sum(1, "float32")
verify_elemwise_sum(5, "float32")
verify_elemwise_sum(4, "int32")
def test_full():
verify_full((3, 4, 5), "float32", 3.14)
verify_full((10,), "int32", 7)
if __name__ == "__main__":
test_elemwise_sum()
test_full()
test_vectorization() |
"""Test code for broadcasting operators.""" |
import numpy as np |
import pytest |
import tvm
from tvm |
import te
from tvm |
import topi
from tvm |
import relay |
import tvm.topi.testing
from tvm.contrib.nvcc |
import have_fp16 |
import tvm.testing
def verify_expand_dims(in_shape, out_shape, axis, num_newaxis):
A = te.placeholder(shape=in_shape, name="A")
B = topi.expand_dims(A, axis, num_newaxis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="expand_dims")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = data_npy.reshape(out_shape)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_reinterpret(in_shape, in_dtype, out_dtype, generator):
A = te.placeholder(shape=in_shape, name="A", dtype=in_dtype)
B = topi.reinterpret(A, out_dtype)
def check_device(target, dev):
if in_dtype == "float16" and target == "cuda" and not have_fp16(dev.compute_version):
print("Skip because %s does not have fp16 support" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_elemwise_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="reinterpret")
data_npy = generator(in_shape).astype(in_dtype)
out_npy = data_npy.view(B.dtype)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(in_shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
np.testing.assert_equal(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_transpose(in_shape, axes):
A = te.placeholder(shape=in_shape, name="A")
B = topi.transpose(A, axes)
def check_device(target, dev):
print("Running on target: %s" % targe |
t)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="transpose")
data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype)
out_npy = data_npy.transpose(axes)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_reshape(src_shape, dst_shape):
A = te.placeholder(shape=src_shape, name="A")
B = topi.reshape(A, dst_shape)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="reshape")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.reshape(data_npy, newshape=dst_shape)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.empty(dst_shape, device=dev, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_squeeze(src_shape, axis):
A = te.placeholder(shape=src_shape, name="A")
B = topi.squeeze(A, axis=axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="squeeze")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.squeeze(data_npy, axis=axis)
data_nd = tvm.nd.array(data_npy, dev)
out_nd_shape = out_npy.shape
out_nd = tvm.nd.empty(out_nd_shape, device=dev, dtype=B.dtype)
f |
oo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_concatenate(shapes, axis):
def get_concat_schedule(target):
schedule_map = {
"cpu": topi.x86.schedule_concatenate,
"arm_cpu": topi.arm_cpu.schedule_concatenate,
}
if isinstance(target, str):
target = tvm.target.Target(target)
for key in target.keys:
if key in schedule_map:
return schedule_map[key]
return tvm.topi.testing.get_injective_schedule(target)
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(te.placeholder(shape, name="A" + str(i)))
out_tensor = topi.concatenate(a_tuple=tensor_l, axis=axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = get_concat_schedule(target)(out_tensor)
foo = tvm.build(s, tensor_l + [out_tensor], target, name="concatenate")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.concatenate(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, dev) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_stack(shapes, axis):
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(te.placeholder(shape, name="A" + str(i)))
out_tensor = topi.stack(tensor_l, axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(out_tensor)
foo = tvm.build(s, tensor_l + [out_tensor], target, |
name="stack")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.stack(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, dev) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_split(src_shape, indices_or_sections, axis):
A = te.placeholder(shape=src_shape, name="A")
tensor_l = topi.split(A, indices_or_sections, axis=axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(tensor_l)
foo = tvm.build(s, [A] + list(tensor_l), target, name="split")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npys = np.split(data_npy, indices_or_sections, axis=axis)
data_nd = tvm.nd.array(data_npy, dev)
out_nds = [
tvm.nd.empty(out_npy.shape, device=dev, dtype=tensor_l[0].dtype) for out_npy in out_npys
]
foo(*([data_nd] + out_nds))
for out_nd, out_npy in zip(out_nds, out_npys):
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_expand_like(in_shape, out_shape, axis):
A = te.placeholder(shape=in_shape, name="A")
B = te.placeholder(shape=out_shape, name="B")
C = topi.expand_like(A, B, axis)
s = te.create_schedule([C.op])
def check_device(target):
print("Running on target: %s" % target)
dev = tvm.device(target, 0)
f = tvm.build(s, [A, B, C], target, name="expand_like")
input = np.random.uniform(size=in_shape).astype(A.dtype)
tvm_input = tvm.nd.array(input, dev)
odim = len(out_shape) |
real_axis = [x if x >= 0 else x + odim for x in axis]
real_axis = sorted(real_axis)
for x in real_axis:
input = np.expand_dims(input, x).astype(A.dtype)
for x in real_axis:
input = np.concatenate([input] * out_shape[x], axis=x).astype(A.dtype)
assert input.shape == out_shape
tvm_shape_like = tvm.nd.array(np.zeros(out_shape).astype(B.dtype), dev)
out = tvm.nd.array(np.zeros(out_shape).astype(A.dtype), dev)
f(tvm_input, tvm_shape_like, out)
tvm.testing.assert_allclose(out.numpy(), input)
for target in ["llvm"]:
check_device(target)
def verify_flip(in_shape, axis):
A = te.placeholder(shape=in_shape, name="A")
B = topi.flip(A, axis) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="reverse")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.flip(x_np, axis) + 1
data_nd = tvm.nd.array(x_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "cuda", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
@tvm.testing.uses_gpu
def test_reverse_sequence():
def verify_reverse_sequence(in_data, seq_lengths, batch_axis, seq_axis, ref_res):
seq_lengths = np.array(seq_lengths).astype("int32")
A = te.placeholder(shape=in_data.shape, name="A", dtype=str(in_data.dtype))
B = te.placeholder(shape=seq_lengths.shape, name="B", dtype=str(seq_lengths.dtype))
C = topi.reverse_sequence(A, B, seq_axis, batch_axis)
def check_device( |
target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(C)
foo = tvm.build(s, [A, B, C], target, name="reverse_sequence")
data_nd = tvm.nd.array(in_data, dev)
seq_lengths_nd = tvm.nd.array(seq_lengths, dev)
out_nd = tvm.nd.empty(in_data.shape, device=dev, dtype=A.dtype)
foo(data_nd, seq_lengths_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), ref_res)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 5, 10, 15], [4, 1, 6, 11], [8, 9, 2, 7], [12, 13, 14, 3]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 1, 0, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [5, 4, 6, 7], [10, 9, 8, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 0, 1, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[18, 19, 20], [21, 22, 23], [24, 25, 26]],
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
],
[
[[45, 46, 47], [48, 49, 50], [5 |
1, 52, 53]],
[[36, 37, 38], [39, 40, 41], [42, 43, 44]],
[[27, 28, 29], [30, 31, 32], [33, 34, 35]],
],
]
verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[9, 10, 11], [21, 22, 23], [15, 16, 17]],
[[0, 1, 2], [12, 13, 14], [6, 7, 8]],
[[18, 19, 20], [3, 4, 5], [24, 25, 26]],
],
[
[[36, 37, 38], [48, 49, 50], [42, 43, 44]],
[[27, 28, 29], [39, 40, 41], [33, 34, 35]],
[[45, 46, 47], [30, 31, 32], [51, 52, 53]],
],
]
verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = []
with pytest.raises(Exception) as execinfo:
verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))
assert (
"For reverse_sequnece seq_lengths size should match with dimension of batch axis,"
" but got dimension of batch_axis = 4, and seq_length size = 5" in execinfo.value.args[0]
)
def verify_take(src_shape, indices_src, axis=None, mode="clip", indices_dtype="int32"):
src_dtype = "float32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = te.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = te.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
if axis is None:
out_tensor = topi.take(a=A, indices=indices, mode=mode)
else:
out_tensor = topi.take(a=A, indices=indices, axis=axis, mode=mode)
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out_tensor) |
foo = tvm.build(s, [A] + [indices] + [out_tensor], target, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
if axis is None:
np_mode = "raise" if mode == "fast" else mode
out_npys = np.take(data_npy, indices_src, mode=np_mode)
else:
np_mode = "raise" if mode == "fast" else mode
out_npys = np.take(data_npy, indices_src, axis=axis, mode=np_mode)
data_nd = tvm.nd.array(data_npy, dev)
indices_nd = tvm.nd.array(indices_src, dev)
out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=src_dtype)
foo(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npys)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_strided_slice(in_shape, begin, end, strides=None, axes=None):
A = te.placeholder(shape=in_shape, name="A")
strides = [1, 1, 1] if strides is None else strides
if axes:
strides = [strides[axis] for axis in axes]
B = topi.strided_slice(A, begin, end, strides, axes) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="stride_slice")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = tvm.topi.testing.strided_slice_python(x_np, begin, end, strides, axes=axes) + 1
data_nd = tvm.nd.array(x_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in |
["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_dynamic_strided_slice(in_shape, begin, end, strides=None):
A = te.placeholder(shape=in_shape, name="A")
Begin = te.placeholder(shape=[len(in_shape)], name="begin", dtype="int64")
End = te.placeholder(shape=[len(in_shape)], name="end", dtype="int64")
Strides = te.placeholder(shape=[len(in_shape)], name="strides", dtype="int64")
strides = [1, 1, 1] if strides is None else strides
B = topi.strided_slice(A, Begin, End, Strides) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, Begin, End, Strides, B], target, name="stride_slice")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = tvm.topi.testing.strided_slice_python(x_np, begin, end, strides) + 1
data_nd = tvm.nd.array(x_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
begin_nd = tvm.nd.array(np.array(begin).astype("int64"), dev)
end_nd = tvm.nd.array(np.array(end).astype("int64"), dev)
strides_nd = tvm.nd.array(np.array(strides).astype("int64"), dev)
foo(data_nd, begin_nd, end_nd, strides_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_strided_set(in_shape, v_shape, begin, end, strides=None):
A = te.placeholder(shape=in_shape, name="A")
V = te.placeholder(shape=v_shape, name="V")
b = te.placeholder(shape=(len(begin),), name="b", dtype="int32")
e = te.placeholder(shape=(len(end),), name="e", dtype="int32")
if strides is not None:
st = te.placeholder(shape=(len(stri |
des),), name="st", dtype="int32")
B = topi.strided_set(A, V, b, e, st) + 1
else:
B = topi.strided_set(A, V, b, e) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
if strides is not None:
foo = tvm.build(s, [A, V, b, e, st, B], target, name="stride_set")
s_np = np.asarray(strides).astype("int32")
s_nd = tvm.nd.array(s_np, dev)
else:
foo = tvm.build(s, [A, V, b, e, B], target, name="stride_set")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
v_np = np.random.uniform(size=v_shape).astype(V.dtype)
b_np = np.asarray(begin).astype("int32")
e_np = np.asarray(end).astype("int32")
out_npy = tvm.topi.testing.strided_set_python(x_np, v_np, begin, end, strides) + 1
data_nd = tvm.nd.array(x_np, dev)
v_nd = tvm.nd.array(v_np, dev)
b_nd = tvm.nd.array(b_np, dev)
e_nd = tvm.nd.array(e_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
if strides is not None:
foo(data_nd, v_nd, b_nd, e_nd, s_nd, out_nd)
else:
foo(data_nd, v_nd, b_nd, e_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_gather(data, axis, indices):
data = np.asarray(data)
indices = np.asarray(indices)
var_data = te.placeholder(shape=data.shape, dtype=data.dtype.name, name="data")
var_indices = te.placeholder(shape=indices.shape, dtype=indices.dtype.name, name="indices")
out_tensor = topi.gather(var_data, axis, var_indices)
def check_device(target, dev):
print("Running on |
target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out_tensor)
func = tvm.build(s, [var_data, var_indices, out_tensor], target, name="gather")
out_npys = tvm.topi.testing.gather_python(data, axis, indices)
data_nd = tvm.nd.array(data, dev)
indices_nd = tvm.nd.array(indices, dev)
out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=data.dtype.name)
func(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npys)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_gather_nd(src_shape, indices_src, indices_dtype):
src_dtype = "float32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = te.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = te.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
out_tensor = topi.gather_nd(a=A, indices=indices)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out_tensor)
func = tvm.build(s, [A, indices, out_tensor], target, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
out_npys = tvm.topi.testing.gather_nd_python(data_npy, indices_src)
data_nd = tvm.nd.array(data_npy, dev)
indices_nd = tvm.nd.array(indices_src, dev)
out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=src_dtype)
func(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npys)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_arange(start, stop, step):
if start is None and step is None:
A = topi.arange(stop) |
a_np = np.arange(stop)
elif start is None:
A = topi.arange(stop, step=step)
a_np = np.arange(stop, step=step)
elif step is None:
A = topi.arange(start, stop)
a_np = np.arange(start, stop)
else:
A = topi.arange(start, stop, step)
a_np = np.arange(start, stop, step)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(A)
f = tvm.build(s, [A], target, name="arange")
a_nd = tvm.nd.empty(a_np.shape, dtype="float32", device=dev)
f(a_nd)
tvm.testing.assert_allclose(a_nd.numpy(), a_np)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_repeat(in_shape, repeats, axis):
A = te.placeholder(shape=in_shape, name="A")
B = topi.repeat(A, repeats, axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="repeat")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.repeat(data_npy, repeats, axis)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_tile(in_shape, reps):
A = te.placeholder(shape=in_shape, name="A")
B = topi.tile(A, reps)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="tile")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_ |
npy = np.tile(data_npy, reps)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_where(in_shape):
Cond = te.placeholder(shape=in_shape, name="cond")
dtype = Cond.dtype
A = te.placeholder(shape=in_shape, name="A")
B = te.placeholder(shape=in_shape, name="B")
C = topi.where(Cond, A, B)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(C)
f = tvm.build(s, [Cond, A, B, C], target, name="where")
cond_npy = np.random.uniform(low=-1, high=1, size=in_shape).astype(dtype)
x_npy = np.random.uniform(size=in_shape).astype(dtype)
y_npy = np.random.uniform(size=in_shape).astype(dtype)
out_npy = np.where(cond_npy, x_npy, y_npy)
cond_nd = tvm.nd.array(cond_npy, dev)
x_nd = tvm.nd.array(x_npy, dev)
y_nd = tvm.nd.array(y_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev)
f(cond_nd, x_nd, y_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_one_hot(indices_shape, depth, on_value, off_value, axis, dtype):
indices = te.placeholder(shape=indices_shape, name="indices", dtype="int32")
on_value_const = tvm.tir.const(on_value, dtype)
off_value_const = tvm.tir.const(off_value, dtype)
one_hot_result = topi.transform.one_hot(
indices, on_value_const, off_value_const, depth, axis, dtype
)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(on |
e_hot_result)
fn = tvm.build(s, [indices, one_hot_result], target, name="one_hot")
indices_npy = np.random.randint(0, depth, size=indices_shape).astype(indices.dtype)
out_npy = tvm.topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
indices_nd = tvm.nd.array(indices_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(one_hot_result.dtype), dev)
fn(indices_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_unravel_index(indices, shape, dtype, indice_dtype="int64"):
x_data = np.array(indices).astype(indice_dtype)
y_data = np.array(shape).astype(dtype)
if len(x_data.shape) == 1:
dst_shape = [y_data.shape[0], x_data.shape[0]]
else:
dst_shape = [y_data.shape[0]]
X = te.placeholder(shape=x_data.shape, dtype=indice_dtype, name="X")
Y = te.placeholder(shape=y_data.shape, dtype=dtype, name="Y")
Z = topi.unravel_index(X, Y)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(Z)
foo = tvm.build(s, [X, Y, Z], target, name="unravel_index")
out_npy = np.unravel_index(x_data, y_data)
datax_nd = tvm.nd.array(x_data, dev)
datay_nd = tvm.nd.array(y_data, dev)
out_nd = tvm.nd.empty(dst_shape, device=dev, dtype=Z.dtype)
foo(datax_nd, datay_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
output_shape_data = np.array(output_shape)
default_value_data = np.array(defa |
ult_value)
A = te.placeholder(
shape=sparse_indices_data.shape, name="sparse_indices", dtype=str(sparse_indices_data.dtype)
)
B = te.placeholder(
shape=sparse_values_data.shape, name="sparse_values", dtype=str(sparse_values_data.dtype)
)
if default_value is None:
args = [A, B]
D = topi.sparse_to_dense(A, output_shape, B)
else:
C = te.placeholder(shape=(), name="default_value", dtype=str(default_value_data.dtype))
args = [A, B, C]
D = topi.sparse_to_dense(A, output_shape, B, C)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(D)
foo = tvm.build(s, args + [D], target, name="sparse_to_dense")
sparse_indices_nd = tvm.nd.array(sparse_indices_data, dev)
sparse_values_nd = tvm.nd.array(sparse_values_data, dev)
out_nd = tvm.nd.empty(output_shape_data, device=dev, dtype=B.dtype)
if default_value is None:
foo(sparse_indices_nd, sparse_values_nd, out_nd)
else:
default_value_nd = tvm.nd.array(default_value_data, dev)
foo(sparse_indices_nd, sparse_values_nd, default_value_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), np.array(xpected))
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_matrix_set_diag(input_shape, diagonal_shape, dtype, k=0, align="RIGHT_LEFT"):
input = te.placeholder(shape=input_shape, name="input", dtype=dtype)
diagonal = te.placeholder(shape=diagonal_shape, name="diagonal", dtype=dtype)
matrix_set_diag_result = topi.transform.matrix_set_diag(input, diagonal, k, align)
def check_device(target, dev):
dev = tvm.device(target, 0)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(matrix_set_diag_result)
fn = |
tvm.build(s, [input, diagonal, matrix_set_diag_result], target, name="matrix_set_diag")
input_npy = np.random.randint(-100, 100, size=input_shape).astype(dtype)
diagonal_npy = np.random.randint(-100, 100, size=diagonal_shape).astype(dtype)
out_npy = tvm.topi.testing.matrix_set_diag(input_npy, diagonal_npy, k, align)
input_nd = tvm.nd.array(input_npy, dev)
diagonal_nd = tvm.nd.array(diagonal_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(matrix_set_diag_result.dtype), dev)
fn(input_nd, diagonal_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_adv_index(data_shape, index_shapes, indice_dtype="int64"):
dtype = "float32"
data = te.placeholder(shape=data_shape, name="data", dtype=dtype)
indices = []
np_data = np.random.uniform(size=data_shape).astype(dtype)
np_indices = []
for i, index_shape in enumerate(index_shapes):
limit = data_shape[i]
np_indices.append(np.random.uniform(0, limit - 1, size=index_shape).astype(indice_dtype))
indices.append(
te.placeholder(shape=index_shape, name="index_{}".format(i), dtype=indice_dtype)
)
np_out = np_data[tuple(np_indices)]
out = topi.adv_index(data, indices)
def check_device(target, dev):
dev = tvm.device(target, 0)
if not dev.exist:
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.create(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
func = tvm.build(s, [data] + indices + [out], target, name="adv_index")
nd_list = [tvm.nd.array(np_data, dev)]
for np_index in np_indices:
nd_list.append(tvm.nd.array(np_index, dev))
nd_list.append(tvm.nd.empty(out.shape, device=dev, dtype=data.dtype)) |
func(*nd_list)
tvm.testing.assert_allclose(nd_list[-1].numpy(), np.array(np_out))
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_trilu(input_shape, upper, k=0):
x = te.placeholder(shape=input_shape, name="x", dtype="float32")
k_tir = tvm.tir.const(k, dtype="int32")
trilu_result = topi.transform.trilu(x, k_tir, upper)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(trilu_result)
fn = tvm.build(s, [x, trilu_result], target, name="trilu")
x_npy = np.random.normal(size=input_shape).astype(x.dtype)
if upper:
out_npy = np.triu(x_npy, k)
else:
out_npy = np.tril(x_npy, k)
x_nd = tvm.nd.array(x_npy, dev)
out_nd = tvm.nd.array(np.empty(x_npy.shape).astype(trilu_result.dtype), dev)
fn(x_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
@tvm.testing.uses_gpu
def test_strided_slice():
verify_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify_strided_slice((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_strided_slice((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3])
verify_strided_slice((3, 4, 3), [0, 2, 0], [1, 2, 3])
verify_strided_slice((3, 4, 3), [0, 0, 0], [None, None, None])
verify_strided_slice((3, 4, 3), [0], [2], None, axes=[1])
@tvm.testing.uses_gpu
def test_dynamic_strided_slice():
verify_dynamic_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_dynamic_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify |
_dynamic_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_dynamic_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3])
verify_dynamic_strided_slice((3, 4, 3), [0, 2, 0], [1, 2, 3])
@tvm.testing.uses_gpu
def test_strided_set():
verify_strided_set((3, 4, 3), (3, 2, 2), [0, 3, 0], [4, 1, 4], [1, -1, 2])
verify_strided_set((3, 4, 3), (3, 1, 2), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_strided_set((3, 4, 3), (1, 3, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify_strided_set((3, 4, 3), (1, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify_strided_set((3, 4, 3), (1, 2, 2), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_strided_set((3, 4, 3), (1, 2, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify_strided_set((3, 4, 3), (1, 2, 3), [1, 1, 0], [2, 3, 3], [1])
verify_strided_set((3, 4, 3), (2, 3, 3), [1, 1, 0], [4, 4, 3])
verify_strided_set((3, 4, 3), (2, 3, 3), [1, 1], [4, 4, 3])
@tvm.testing.uses_gpu
def test_expand_dims():
verify_expand_dims((3, 10), (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), (1, 3, 10), -3, 1)
@tvm.testing.uses_gpu
def test_reinterpret():
verify_reinterpret((1000,), "float32", "int32", lambda shape: np.random.randn(*shape) * 1000)
verify_reinterpret((1000,), "float16", "int16", lambda shape: np.random.randn(*shape) * 100)
verify_reinterpret(
(1000,), "int16", "uint16", lambda shape: np.random.randint(-1000, 1000, size=shape)
)
verify_reinterpret(
(1000,), "uint32", "int32", lambda shape: np.random.randint(0, 2**32 - 1, size=shape)
)
verify_reinterpret(
(1000,), "uint32", "int32", lambda shape: np.random.randint(0, 2**32 - 1, size=shape)
)
@tvm.testing.uses_gpu
def test_transpose():
verify_transpose((3, 10, 2), (1, 0, 2))
verify_transpose((3, 10, 5), (2, 0, 1))
verify_transpose((3, 10), None)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_transpose_unfused_schedule(target, dev):
shape = (100, tvm.target.Target(target).thread_warp_size + 3)
x = relay.var( |
"x", relay.TensorType(shape, "float32"))
f = relay.transpose(x)
r = np.random.rand(*shape)
func = relay.create_executor(
kind="graph", mod=tvm.IRModule.from_expr(relay.Function([x], f)), device=dev, target=target
).evaluate()
tvm.testing.assert_allclose(func(r).numpy(), np.transpose(r))
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(shape, "float32"))
f = relay.transpose(x + y)
func = relay.create_executor(
kind="graph",
mod=tvm.IRModule.from_expr(relay.Function([x, y], f)),
device=dev,
target=target,
).evaluate()
tvm.testing.assert_allclose(func(r, r).numpy(), np.transpose(r + r))
@tvm.testing.uses_gpu
def test_reshape():
verify_reshape((1, 2, 3, 4), (2, 3, 4))
verify_reshape((4, 2, 3, 4), (2, 4, 12))
verify_reshape((4, 2, 3, 4), (2, 48))
verify_reshape((16,), (2, 2, 2, 2))
verify_reshape((4, 0), (2, 0, 2))
@tvm.testing.uses_gpu
def test_where():
verify_where(())
verify_where((1, 2, 3, 4))
@tvm.testing.uses_gpu
def test_squeeze():
verify_squeeze((1, 2, 3, 4), 0)
verify_squeeze((1, 2, 1, 4), None)
verify_squeeze((1, 1, 1, 4), (1, 2))
verify_squeeze((1, 1, 1, 1), None)
verify_squeeze((1, 1, 1, 1), ())
A = te.placeholder((2,), "float32", "A")
E = topi.squeeze(A)
C = te.compute((1,), lambda i: E[(2 * A[0] - 1).astype("int32")])
for target in ["llvm", "cuda", "opencl"]:
dev = tvm.device(target, 0)
if tvm.testing.device_enabled(target):
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(C)
func = tvm.build(s, [A, C])
a = tvm.nd.array(np.array((1, 2)).astype("float32"), device=dev)
c = tvm.nd.empty((1,), dtype="float32", device=dev)
func(a, c)
assert c.numpy()[0] == 2
@tvm.testing.uses_gpu
def test_concatenate():
verify_concatenate([(2,), (2,), (2,)], -1)
veri |
fy_concatenate([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1)
verify_concatenate([(1, 2, 4), (1, 2, 3), (1, 2, 7), (1, 2, 8), (1, 2, 1)], -1)
verify_concatenate([(5, 6, 7, 3), (16, 6, 7, 3), (12, 6, 7, 3), (8, 6, 7, 3), (2, 6, 7, 3)], 0)
verify_concatenate([(1, 14400), (1, 2400), (1, 640), (1, 240)], 1)
@tvm.testing.uses_gpu
def test_stack():
verify_stack([(2,), (2,), (2,)], -1)
verify_stack([(2,), (2,), (2,)], 1)
verify_stack([(2,), (2,), (2,)], 0)
verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)
@tvm.testing.uses_gpu
def test_split():
verify_split((2, 12, 3), 3, 1)
verify_split((2, 12, 3), [2, 4], 1)
verify_split((10, 12, 24), [5, 7, 9], -1)
@tvm.testing.uses_gpu
def test_flip():
verify_flip((3, 4, 3), 1)
verify_flip((3, 4, 3), 0)
verify_flip((3, 4, 3), 2)
verify_flip((3, 4, 3), -1)
verify_flip((3, 4, 3), -3)
verify_flip((3, 4, 3), -2)
@tvm.testing.requires_llvm
def test_expand_like():
verify_expand_like((3,), (2, 3), [0])
verify_expand_like((2,), (2, 3), [1])
verify_expand_like((3, 4), (3, 5, 4), [1])
verify_expand_like((5, 7), (5, 6, 7, 8), [1, 3])
@tvm.testing.uses_gpu
def test_take():
verify_take((4,), [1])
verify_take((4,), [[0, 1, 2, 3]])
verify_take((3, 3, 3), [[11, 25]])
verify_take((4,), [[0, 1], [2, 3]])
verify_take((4,), [1], 0)
verify_take((2, 2), [[[1, 0], [0, 1]]], 0)
verify_take((2, 2), [[[1, 0], [0, 1]]], 1)
verify_take((4, 3, 5, 6), [[2, 1, 0, 0]], -2)
verify_take((3, 4), [-5, 20])
verify_take((3, 4), [-5, 20], mode="wrap")
verify_take((3, 4), [-1, 2], axis=0)
verify_take((3, 4), [-1, 2], axis=0, mode="wrap")
verify_take((3, 4), [-1, 2], axis=1)
verify_take((3, 4), [-1, 2], axis=1, mode="wrap")
verify_take((3, 3, 3), [[11, 25]], mode="fast")
verify_take((3, 4), [0, 2], axis=0, mode="fast")
verify_take((3, 4), [0, 2], axis=1, mode="fast")
verify_take((3, 4), [1, 2], |
axis=1, indices_dtype="uint32")
verify_take((3, 4), [1, 2], axis=1, mode="wrap", indices_dtype="uint16")
verify_take((3, 3, 3), [[11, 20]], mode="fast", indices_dtype="uint8")
@tvm.testing.uses_gpu
def test_gather():
verify_gather([[1, 2], [3, 4]], 1, [[0, 0], [1, 0]])
verify_gather(np.random.randn(4, 7, 5), 0, np.random.randint(low=0, high=4, size=(1, 7, 5)))
verify_gather(np.random.randn(4, 7, 5), 0, np.random.randint(low=0, high=4, size=(4, 7, 5)))
verify_gather(np.random.randn(4, 7, 5), 1, np.random.randint(low=0, high=7, size=(4, 10, 5)))
verify_gather(np.random.randn(4, 7, 5), 1, np.random.randint(low=0, high=7, size=(4, 10, 5)))
verify_gather(np.random.randn(4, 7, 5), 2, np.random.randint(low=0, high=5, size=(4, 7, 2)))
verify_gather(np.random.randn(4, 7, 5), 2, np.random.randint(low=0, high=5, size=(4, 7, 10)))
@tvm.testing.uses_gpu
def test_gather_nd():
for indices_dtype in ["int32", "float32", "uint8"]:
verify_gather_nd((4,), [[1.8]], indices_dtype)
verify_gather_nd((4,), [[1, 3, 2]], indices_dtype)
verify_gather_nd((2, 3), [[1]], indices_dtype)
verify_gather_nd((2, 3), [[1], [0]], indices_dtype)
verify_gather_nd((2, 3), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4), [[1, 0], [0, 2], [3, 1]], indices_dtype)
verify_gather_nd(
(2, 3, 4), [[[1, 0], [0, 1]], [[0, 2], [1, 2]], [[3, 1], [0, 2]]], indices_dtype
)
verify_gather_nd((2, 3, 4, 5), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4, 5), [[1, 0], [2, 1], [3, 2], [4, 2]], indices_dtype)
@tvm.testing.uses_gpu
def test_arange():
verify_arange(None, 20, None)
verify_arange(None, 20, 2)
verify_arange(1, 20, None)
verify_arange(1, 20, 2)
verify_arange(1, 20, 1.5)
verify_arange(1, 20.5, None)
verify_arange(1, 20, 3)
verify_arange(20, 1, -1)
verify_arange(20, 1, -1.5)
@tvm.testing.uses_gpu
def |
test_repeat():
verify_repeat((2,), 1, 0)
verify_repeat((3, 2), 2, 0)
verify_repeat((3, 2, 4), 3, 1)
verify_repeat((1, 3, 2, 4), 4, -1)
@tvm.testing.uses_gpu
def test_tile():
verify_tile((3, 2), (2, 3))
verify_tile((3, 2, 5), (2,))
verify_tile((3,), (2, 3, 3))
verify_tile((4, 0), (5,))
@tvm.testing.uses_gpu
def test_layout_transform():
in_shape = (1, 32, 8, 8)
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.layout_transform(A, "NCHW", "NCHW16c")
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.transpose(input, axes=(0, 2, 3, 1))
output = np.reshape(output, newshape=(1, 8, 8, 2, 16))
output = np.transpose(output, axes=(0, 3, 1, 2, 4))
def check_device(target, dev):
tvm_input = tvm.nd.array(input, dev)
tvm_output = tvm.nd.empty(output.shape, device=dev, dtype=B.dtype)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
f = tvm.build(s, [A, B], target, name="layout_transform")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.numpy(), output)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_shape():
in_shape = (8, 7, 13)
dtype = "int32"
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.shape(A, dtype)
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.asarray(in_shape).astype(dtype)
def check_device(target, dev):
tvm_input = tvm.nd.array(input, dev)
tvm_output = tvm.nd.empty(output.shape, device=dev, dtype=dtype)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
f = tvm.build(s, [A, B], target, name="shape")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_outp |
ut.numpy(), output)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_sequence_mask():
for in_shape in (5, 10), (3, 4, 5, 4):
for axis in [0, 1]:
for mask_value in [0.0, 1.0]:
max_length = in_shape[axis]
batch_size = in_shape[1 - axis]
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = te.placeholder(shape=(batch_size,), dtype="int32", name="B")
C = topi.sequence_mask(A, B, axis=axis, mask_value=mask_value)
A_data = np.random.normal(0, 1, in_shape).astype(np.float32)
B_data = np.random.randint(1, max_length, (batch_size,)).astype(np.int32)
C_gt_data = tvm.topi.testing.sequence_mask(A_data, B_data, mask_value, axis)
def check_device(target, dev):
tvm_A = tvm.nd.array(A_data, dev)
tvm_B = tvm.nd.array(B_data, dev)
tvm_C = tvm.nd.empty(in_shape, device=dev, dtype="float32")
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(C)
f = tvm.build(s, [A, B, C], target, name="SequenceMask")
f(tvm_A, tvm_B, tvm_C)
tvm.testing.assert_allclose(tvm_C.numpy(), C_gt_data)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_ndarray_size():
in_shape = (5, 11, 7)
dtype = "int32"
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.ndarray_size(A, dtype)
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.asarray(np.size(input)).astype(dtype)
def check_device(target, dev):
tvm_input = tvm.nd.array(input, device=dev)
tvm_output = tvm.nd.empty((), device=de |
v, dtype=B.dtype)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
f = tvm.build(s, [A, B], target, name="ndarray_size")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.numpy(), output)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_where_fusion():
"""integration test that where and zeros should be properly inlined"""
def check_device(target, dev):
with tvm.target.Target(target):
print("Running on target: %s" % target)
conv2d_compute, conv2d_schedule = tvm.topi.testing.get_conv2d_nchw_implement(target)
data = te.placeholder((2, 1, 2, 4), "int8", "data")
w = te.placeholder((3, 1, 2, 2), "int8", "w")
conv1 = conv2d_compute(data, w, 1, 0, 1, "int32")
zeros = topi.full((2, 3, 1, 3), "int32", tvm.tir.const(0, dtype="int32"))
gt = topi.greater_equal(conv1, zeros)
one = topi.full((2, 3, 1, 3), "int32", tvm.tir.const(1, dtype="int32"))
two = topi.full((2, 3, 1, 3), "int32", tvm.tir.const(2, dtype="int32"))
where = topi.where(gt, one, two)
add = topi.add(conv1, where)
outs = [add]
s = conv2d_schedule(outs)
tvm.build(s, [data, w, add], target=backend)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_one_hot():
verify_one_hot((3,), 3, 1, 0, -1, "int32")
verify_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
verify_one_hot((2, 2), 5, 2, -2, 0, "int32")
verify_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
verify_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
verify_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
@tvm.testing.uses_gpu
def test_unravel_index():
for dtype in ["int32", "int64"]:
for indice_dtype in ["int64", "uin |
t8", "uint16", "uint32"]:
verify_unravel_index([0, 1, 2, 3], [2, 2], dtype, indice_dtype)
verify_unravel_index([144], [5, 5, 5, 2], dtype, indice_dtype)
verify_unravel_index(144, [5, 5, 5, 2], dtype, indice_dtype)
verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype, indice_dtype)
@tvm.testing.uses_gpu
def test_sparse_to_dense():
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0])
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3])
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
)
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
)
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
)
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0])
@tvm.testing.uses_gpu
def test_matrix_set_diag():
for dtype in ["float32", "int32"]:
verify_matrix_set_diag((2, 2), (2,), dtype)
verify_matrix_set_diag((4, 3, 3), (4, 3), dtype)
verify_matrix_set_diag((2, 3, 4), (2, 3), dtype, 1)
verify_matrix_set_diag((2, 3, 4), (2, 4, 3), dtype, (-1, 2), "LEFT_RIGHT")
verify_matrix_set_diag((2, 3, 4), (2, 4, 3), dtype, (-1, 2), "LEFT_LEFT")
verify_matrix_set_diag((2, 3, 4), (2, 4, 3), dtype, (-1, 2), "RIGHT_RIGHT")
@tvm.testing.uses_gpu
def test_adv_index():
for indice_dtype in ["int32", "int64", "uint8", "uint16", "uint32"]:
verify_adv_index((3, 4, 5), [(2,), (2,), (1,)], indice_dtype=indice_dtype)
verify_adv_index((10, 15, 5), [(4, 1), (1, 7)], indice_dtype=indice_dtype)
verify_adv_index((10, 5, 15), [(1, 2, 1), (1, 2, 7)], indice_dtype=indice_dtype)
@tvm.testing.uses_gpu
def test_trilu():
verify_trilu((3, 3), True, 0)
verify_trilu((3, |
3), False, 0)
verify_trilu((6, 6), True, 1)
verify_trilu((6, 6), False, 2)
verify_trilu((6, 6), False, -2)
verify_trilu((8, 6, 6), False, -2)
if __name__ == "__main__":
test_strided_slice()
test_concatenate()
test_stack()
test_transpose()
test_expand_dims()
test_reshape()
test_where()
test_squeeze()
test_split()
test_flip()
test_expand_like()
test_take()
test_gather_nd()
test_arange()
test_layout_transform()
test_repeat()
test_tile()
test_shape()
test_sequence_mask()
test_ndarray_size()
test_where_fusion()
test_one_hot()
test_unravel_index()
test_sparse_to_dense()
test_matrix_set_diag()
test_adv_index()
test_trilu() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import topi |
import tvm.topi.testing
in_dtype = tvm.testing.parameter("int32", "int64")
is_sorted = tvm.testing.parameter(True, False, ids=["sorted", "unsorted"])
with_counts = tvm.testing.parameter(True, False, ids=["with_counts", "no_counts"])
arr_size, maxval = tvm.testing.parameters((1, 100), (10, 10), (10000, 100))
@tvm.testing.parametrize_targets
def test_unique(dev, target, in_dtype, is_sorted, with_counts, arr_size, maxval):
def calc_numpy_unique(data, is_sorted=False):
uniq, index, inverse, counts = np.unique(
data, return_index=True, return_inverse=True, return_counts=True
)
num_uniq = np.array([len(uniq)]).astype("int32")
if not is_sorted:
order = np.argsort(index)
index = np.sort(index)
reverse_order = np.argsort(order)
uniq = uniq[order].astype(data.dtype)
inverse = np.array([reverse_order[i] for i in inverse]).astype("int32")
counts = counts[order].astype("int32")
return [
uniq.astype(data.dtype),
index.astype("int32"),
inverse.astype("int32"),
counts,
num_uniq,
]
data = np.random.randint(0, maxval, size=(arr_size)).astype(in_dtype)
np_unique, np_indices, np_inverse_indices, np_counts, np_num_unique = calc_numpy_unique(
data, is_sorted
)
num_unique = np_num_unique[0]
implementations = {
"generic": (
lambda x, return_counts: topi.unique(x, is_sorted, return_counts),
topi.generic.schedule_unique,
),
"gpu": (
lambda x, return_counts: topi.cuda.unique(x, is_sorted, return_counts),
topi.cuda.schedule_scan,
),
"nvptx": (
lambda x, return_counts: topi.cuda.unique(x, is_sorted, return_counts),
topi.cuda.schedule_scan,
),
}
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
tvm_data = tvm.nd.array(data, device=dev)
tvm_unique = tvm.nd. |
array(np.zeros(data.shape).astype(data.dtype), device=dev)
tvm_indices = tvm.nd.array(np.zeros(data.shape).astype("int32"), device=dev)
tvm_inverse_indices = tvm.nd.array(np.zeros(data.shape).astype("int32"), device=dev)
tvm_num_unique = tvm.nd.array(np.zeros([1]).astype("int32"), device=dev)
with tvm.target.Target(target):
te_input = tvm.te.placeholder(shape=data.shape, dtype=str(data.dtype))
outs = fcompute(te_input, with_counts)
s = fschedule(outs)
func = tvm.build(s, [te_input, *outs])
if with_counts:
tvm_counts = tvm.nd.array(np.zeros(data.shape).astype("int32"), device=dev)
func(
tvm_data,
tvm_unique,
tvm_indices,
tvm_inverse_indices,
tvm_num_unique,
tvm_counts,
)
else:
func(tvm_data, tvm_unique, tvm_indices, tvm_inverse_indices, tvm_num_unique)
num_unique = np_num_unique[0]
assert tvm_num_unique.numpy()[0] == np_num_unique
np.testing.assert_allclose(tvm_unique.numpy()[:num_unique], np_unique, atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(tvm_indices.numpy()[:num_unique], np_indices, atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(
tvm_inverse_indices.numpy(), np_inverse_indices, atol=1e-5, rtol=1e-5
)
if with_counts:
np.testing.assert_allclose(tvm_counts.numpy()[:num_unique], np_counts, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main() |
"""Test code for upsampling""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing |
import math
from tvm.topi.utils |
import nchw_pack_layout
def verify_upsampling(
batch,
in_channel,
in_height,
in_width,
scale_h,
scale_w,
layout="NCHW",
method="nearest_neighbor",
in_batch_block=0,
in_channel_block=0,
):
if layout == "NCHW":
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
)
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
elif nchw_pack_layout(layout):
A = te.placeholder(
(batch, in_channel, in_height, in_width, in_batch_block, in_channel_block), name="A"
)
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_batch_block,
in_channel_block,
)
a_np = np.random.uniform(
size=(batch, in_channel, in_height, in_width, in_batch_block, in_channel_block)
).astype(dtype)
elif layout == "NHWC":
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
dtype = A.dtype
out_shape = (
batch,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_channel,
)
a_np = np.random.uniform(size=(batch, in_height, in_width, in_channel)).astype(dtype)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.nn.upsampling(A, scale_h, scale_w, layout=layout, method=method, align_corners=False)
b_np = tvm.topi.testing.resize2d_python(
a_np,
(scale_h, scale_w),
layout,
method[2:] if method[0:2] == "bi" else method,
"asymmetric",
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.tar |
get.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_int_div_upsampling():
"""Test whether upsampling op is tilable when scale_h and scale_w is integer.
Compute_at cannot work correctly in the original floating-point multiplication.
After using integer division,compute_at can work correctly and reduce the
capacity of cache buffer.
In this test case, scale_h and scale_w are set to integers, the size
of cache buffer should be equal to (h_i/scale_h * w_i/scale_w * c_i).
"""
dtype = "int8"
scale_h = 2
scale_w = 2
x = te.placeholder([1, 32, 64, 64], dtype, "x")
y = topi.nn.upsampling(x, scale_h, scale_w)
func = te.create_prim_func([x, y])
s = tvm.tir.Schedule(func)
block = s.get_block("resize")
cache = s.cache_read(block, 0, "local")
n, c, h, w = s.get_loops(block)
s_factor = 8
c_o, c_i = s.split(c, factors=[None, s_factor])
h_o, h_i = s.split(h, factors=[None, s_factor])
w_o, w_i = s.split(w, factors=[None, s_factor])
s.reorder(n, c_o, h_o, w_o, h_i, w_i, c_i)
s.compute_at(cache, w_o)
wanted_rt = s_factor**3 / (scale_h * scale_w)
def analyze_upsampling_allocate(stmt):
if isinstance(stmt, tvm.tir.stmt.Allocate):
tvm.testing.assert_allclose(stmt.extents[0].value, wanted_rt)
lowerd_irmodule = tvm.lower(s.mod["main"])
tvm.tir.stmt_functor.post_order_visit(
lowerd_irmodule.functions.items()[0][1].body, analyze_upsampling_allocate
)
@tvm.testing.uses_gpu
def test_upsampling():
verify_upsampling(8, 16, 32, 32, 2.0, 2.0)
verify_upsampling(2, 32, 64, 64, 3.0, 3.0)
verify_upsampling(1, 64, 22, 32, 1.95454 |
5497894287, 2.0)
verify_upsampling(8, 16, 32, 32, 2.0, 2.0, layout="NHWC")
verify_upsampling(2, 32, 64, 64, 3.0, 3.0, layout="NHWC")
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0, layout="NHWC")
verify_upsampling(2, 2, 32, 32, 2.0, 2.0, method="bilinear")
verify_upsampling(2, 2, 32, 32, 3.0, 3.0, method="bilinear")
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0, method="bilinear")
verify_upsampling(2, 2, 32, 32, in_batch_block=4, in_channel_block=8, scale_h=2.0, scale_w=2.0)
verify_upsampling(2, 2, 64, 64, in_batch_block=1, in_channel_block=16, scale_h=3.0, scale_w=3.0)
verify_upsampling(
1, 4, 22, 32, in_batch_block=1, in_channel_block=16, scale_h=1.954545497894287, scale_w=2.0
)
verify_upsampling(
2,
2,
32,
32,
in_batch_block=1,
in_channel_block=1,
scale_h=2.0,
scale_w=2.0,
method="bilinear",
)
verify_upsampling(
2,
2,
32,
32,
in_batch_block=1,
in_channel_block=1,
scale_h=3.0,
scale_w=3.0,
method="bilinear",
)
verify_upsampling(
2,
4,
22,
32,
in_batch_block=1,
in_channel_block=16,
scale_h=1.954545497894287,
scale_w=2.0,
layout="NCHW1n16c",
method="bilinear",
)
verify_upsampling(2, 2, 32, 32, 2.0, 2.0, layout="NHWC", method="bilinear")
verify_upsampling(2, 2, 32, 32, 3.0, 3.0, layout="NHWC", method="bilinear")
verify_upsampling(1, 64, 22, 32, 3.0, 3.0, layout="NHWC", method="bilinear")
def verify_upsampling3d(
batch,
in_channel,
in_depth,
in_height,
in_width,
scale_d,
scale_h,
scale_w,
layout="NCDHW",
method="nearest_neighbor",
):
if layout == "NCDHW":
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
dtype = A.dtype
out_shape = (
batch,
i |
n_channel,
int(round(in_depth * scale_d)),
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
)
a_np = np.random.uniform(size=(batch, in_channel, in_depth, in_height, in_width)).astype(
dtype
)
elif layout == "NDHWC":
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), name="A")
dtype = A.dtype
out_shape = (
batch,
int(round(in_depth * scale_d)),
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_channel,
)
a_np = np.random.uniform(size=(batch, in_depth, in_height, in_width, in_channel)).astype(
dtype
)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.nn.upsampling3d(
A,
scale_d,
scale_h,
scale_w,
layout=layout,
method=method,
coordinate_transformation_mode="asymmetric",
)
b_np = tvm.topi.testing.resize3d_python(
a_np,
(scale_d, scale_h, scale_w),
layout,
method[3:] if method[0:3] == "tri" else method,
"asymmetric",
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_upsampling3d():
verify_upsampling3d(8, 8, 16, 16, 16, 2.0, 2.0, 2.0)
verify_upsampling3d(2, 16, 32, 32, 32, 3.0, 3.0, 3.0)
verify_upsampling3d(1, 8, 11, 16, 6, 1.954545497894287, 2.0, 1.5)
verify_upsampling3d(8, 8, 16, 16, 16, 2.0, 2.0, 2 |
.0, layout="NDHWC")
verify_upsampling3d(2, 16, 32, 32, 32, 3.0, 3.0, 3.0, layout="NDHWC")
verify_upsampling3d(1, 8, 11, 16, 6, 1.954545497894287, 2.0, 1.5, layout="NDHWC")
verify_upsampling3d(2, 2, 16, 16, 16, 2.0, 2.0, 2.0, method="trilinear")
verify_upsampling3d(2, 2, 32, 32, 32, 3.0, 3.0, 3.0, method="trilinear")
verify_upsampling3d(1, 2, 11, 16, 6, 1.954545497894287, 2.0, 1.5, method="trilinear")
verify_upsampling3d(2, 2, 16, 16, 16, 2.0, 2.0, 2.0, layout="NDHWC", method="trilinear")
verify_upsampling3d(2, 2, 32, 32, 32, 3.0, 3.0, 3.0, layout="NDHWC", method="trilinear")
verify_upsampling3d(
1, 2, 11, 16, 6, 1.954545497894287, 2.0, 1.5, layout="NDHWC", method="trilinear"
)
if __name__ == "__main__":
test_upsampling()
test_upsampling3d()
test_int_div_upsampling() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for util"""
from tvm import topi
def verify_get_shape(src_shape, src_layout, dst_layout, expect_shape):
dst_shape = topi.utils.get_shape(src_shape, src_layout, dst_layout)
assert dst_shape == expect_shape, "Shape mismatch: expecting %s but got %s" % (
expect_shape,
dst_shape,
)
def test_get_shape():
verify_get_shape((1, 3, 224, 224), "NCHW", "NCHW", (1, 3, 224, 224))
verify_get_shape((1, 3, 224, 224), "NCHW", "NHWC", (1, 224, 224, 3))
verify_get_shape((3, 2, 32, 48, 16), "NCHW16c", "NC16cWH", (3, 2, 16, 48, 32))
verify_get_shape((2, 3, 32, 32, 16, 8), "OIHW16i8o", "HWO8oI16i", (32, 32, 2, 8, 3, 16))
if __name__ == "__main__":
test_get_shape()
|
"""Test code for vision package""" |
import math |
import sys |
import numpy as np |
import pytest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.