text
stringlengths 1
2.05k
|
---|
import numpy as np |
import scipy.stats
def threefry_split(target, dev, gen):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
left_placeholder, right_placeholder = tvm.topi.random.threefry_split(gen_placeholder)
s = tvm.topi.generic.schedule_extern([left_placeholder, right_placeholder])
f = tvm.build(s, [gen_placeholder, left_placeholder, right_placeholder])
left = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"))
right = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"))
f(tvm.nd.array(gen), left, right)
return left.numpy(), right.numpy()
def threefry_generate(target, dev, gen, size):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
left_placeholder, right_placeholder = tvm.topi.random.threefry_generate(gen_placeholder, size)
s = tvm.topi.generic.schedule_extern([left_placeholder, right_placeholder])
f = tvm.build(s, [gen_placeholder, left_placeholder, right_placeholder])
out_gen = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"))
rands = tvm.nd.array(np.zeros(size, dtype="uint64"))
f(tvm.nd.array(gen), out_gen, rands)
return out_gen.numpy(), rands.numpy()
def uniform(target, dev, gen, low, high, size, dtype):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
low_placeholder = tvm.te.placeholder(low.shape, name="low", dtype=dtype)
high_placeholder = tvm.te.placeholder(high.shape, name="high", dtype=dtype)
left_placeholder, right_placeholder = tvm.topi.random.uniform(
gen_placeholder, low_placeholder, high_placeholder, size, dtype
)
s = tvm.topi.generic.schedule_extern([left_placeholder, right_placeholder])
f = tvm.build(
s,
[gen_placeholder, low_placeholder, high_placeholder, left_placeholder, right_placeholder],
target=target,
)
out_gen = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"), device=dev)
rands = tvm.nd.array(np.zeros(size, dtype=dtype), device=dev)
f(
tvm.nd.array(gen, device=dev), |
tvm.nd.array(low, device=dev),
tvm.nd.array(high, device=dev),
out_gen,
rands,
)
return out_gen.numpy(), rands.asnumpy()
def multinomial(target, dev, gen, probs, num_samples):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
probs_placeholder = tvm.te.placeholder(probs.shape, name="probs", dtype="float32")
new_gen_placeholder, indices_placeholder = tvm.topi.random.multinomial(
gen_placeholder, probs_placeholder, num_samples
)
s = tvm.topi.generic.schedule_extern([new_gen_placeholder, indices_placeholder])
f = tvm.build(
s,
[gen_placeholder, probs_placeholder, new_gen_placeholder, indices_placeholder],
target=target,
)
out_gen = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"), device=dev)
indices = tvm.nd.array(np.zeros((*probs.shape[:-1], num_samples), dtype="int32"), device=dev)
f(tvm.nd.array(gen), tvm.nd.array(probs), out_gen, indices)
return out_gen.numpy(), indices.asnumpy()
@tvm.testing.parametrize_targets("llvm")
def test_threefry_split(target, dev):
gen = tvm.relay.random.threefry_key(0).data.numpy()
a, b = threefry_split(target, dev, gen)
assert (a != b).any() and (
a != gen
).any(), "Splitting a gen should result in different output gens"
assert (a == np.array([0, 0, 0, 0, 0, 0, 0, 0, 1 << 62, 0], dtype="uint64")).all()
assert (b == np.array([0, 0, 0, 0, 1 << 63, 0, 0, 0, 1 << 62, 0], dtype="uint64")).all()
for i in range(129):
a, b = threefry_split(target, dev, b)
assert (a[0:4] == b[0:4]).all(), "State part of split should be the same"
assert (b[0:4] != np.zeros(4, dtype="uint64")).any()
a, a_rands = threefry_generate(target, dev, a, (100,))
b, b_rands = threefry_generate(target, dev, b, (100,))
assert (
a_rands != b_rands
).all(), "Numbers generated from different initial states should be different"
_, rands1 = threefry_generate(target, dev, a, (100,)) |
_, rands2 = threefry_generate(target, dev, a, (100,))
assert (
rands1 == rands2
).all(), "Numbers generated from the same initial state should be the same"
a1, b1 = threefry_split(target, dev, a)
a2, b2 = threefry_split(target, dev, a)
assert (a1 == a2).all() and (
b1 == b2
).all(), "Split called on the same input should return the same result"
@tvm.testing.parametrize_targets("llvm")
def test_threefry_generate(target, dev):
gen = tvm.relay.random.threefry_key(0).data.numpy()
a, rands = threefry_generate(target, dev, gen, (2048,))
assert (
rands.shape[0] == 2048 and len(rands.shape) == 1
), "Output shape should match requested shape"
assert (a != gen).any(), "Output generator should be different from input generator"
a, rands = threefry_generate(target, dev, gen, (7,))
assert (
rands.shape[0] == 7 and len(rands.shape) == 1
), "Output shape should match requested shape"
gen = np.array(
[0, 0, 0, 0, 0, 0, 0, 2**64 - 2, 1 << 63, 0], dtype="uint64"
)
a, rands = threefry_generate(target, dev, gen, (2048,))
assert gen[4] != a[4], "Overflow of counter should trigger path change"
assert a[7] == 2048, "Overflow of counter should still update counter"
gen = np.array([0, 0, 0, 0, 0, 0, 0, 2**64 - 2, 0, 0], dtype="uint64")
a, rands = threefry_generate(target, dev, gen, (2048,))
assert (
gen[0:4] != a[0:4]
).any(), "Overflowing counter with no space left in path should change state"
@tvm.testing.parametrize_targets("llvm")
def test_threefry_wrapping(target, dev):
assert tvm.topi.random.threefry_test_wrapping(
target, dev
), f"{target} does not suppport wrapping unsigned integer arithmetic"
@tvm.testing.parametrize_targets("llvm")
def test_uniform(target, dev):
gen = tvm.relay.random.threefry_key(0).data.numpy()
m = 1024
n = 1024
dtypes = ["float32", "float64"]
for dtype in dtypes:
low = np.array(5.0, dtyp |
e=dtype)
high = np.array(10.0, dtype=dtype)
new_gen, rands = uniform(target, dev, gen, low, high, (m, n), dtype)
assert (gen != new_gen).any()
assert abs(np.mean(rands) - 7.5) < 1e-1
assert np.min(rands) >= 5.0
assert np.max(rands) <= 10.0
@tvm.testing.parametrize_targets("llvm")
def test_multinomial(target, dev):
def _verify_multinomial(size, num_samples, test_statistics=False):
gen = tvm.relay.random.threefry_key(np.random.randint(0, 1e5)).data.numpy()
probs = np.random.randint(low=-50, high=1000, size=size).astype("float32")
new_gen, indices = multinomial(target, dev, gen, probs, num_samples)
assert (gen != new_gen).any()
assert np.min(indices) >= 0
assert np.max(indices) < probs.shape[-1]
if test_statistics:
probs = probs.astype("float64")
probs = np.reshape(probs, [-1, probs.shape[-1]])
probs = np.maximum(probs, 0)
probs = probs / np.expand_dims(np.sum(probs, axis=-1), axis=-1)
expected_frequency = probs * num_samples + np.finfo(float).eps
expected_frequency = (
np.expand_dims((num_samples / np.sum(expected_frequency, axis=-1)), axis=-1)
* expected_frequency
)
indices = np.reshape(indices, [-1, indices.shape[-1]])
index_list = [np.squeeze(x, 0) for x in np.split(indices, indices.shape[0], axis=0)]
observed_freqs = [np.bincount(samples, minlength=size[-1]) for samples in index_list]
observed_freqs = np.stack(observed_freqs, axis=0)
_, p_value = scipy.stats.chisquare(observed_freqs, expected_frequency, axis=-1)
assert np.all(p_value > 1e-6)
_verify_multinomial([3], 2)
_verify_multinomial([2, 10], 1)
_verify_multinomial([2, 3, 10], 4) |
_verify_multinomial([3, 10], 10000, test_statistics=True)
if __name__ == "__main__":
test_threefry_split(tvm.target.Target("llvm"), tvm.device("cpu"))
test_threefry_generate(tvm.target.Target("llvm"), tvm.device("cpu"))
test_threefry_wrapping(tvm.target.Target("llvm"), tvm.device("cpu"))
test_uniform(tvm.target.Target("llvm"), tvm.device("cpu"))
test_multinomial(tvm.target.Target("llvm"), tvm.device("cpu")) |
"""Test code for QNN operators.""" |
import numpy as np |
import tvm
from tvm |
import topi, relay, te
from tvm.contrib |
import graph_executor |
import tvm.topi.testing
def verify_simulated_quantize(data_shape, out_dtype, channels, axis):
A = te.placeholder(data_shape, name="value", dtype="float32")
D = te.placeholder([], name="dtype", dtype="int32")
S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32")
Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32")
SIM_Q = topi.nn.simulated_quantize(A, D, output_scale=S, output_zero_point=Z, axis=axis)
a_np = np.random.uniform(size=data_shape).astype("float32")
d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[out_dtype])
s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32")
z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32")
q_np = np.zeros(shape=data_shape, dtype="float32")
def check_target(target, dev):
a = tvm.nd.array(a_np, dev)
d = tvm.nd.array(d_np, dev)
s = tvm.nd.array(s_np, dev)
z = tvm.nd.array(z_np, dev)
q = tvm.nd.array(q_np, dev)
per_channel = channels[0] != 1
a_var = relay.var("a", shape=data_shape, dtype="float32")
if per_channel:
s_var = relay.const(s_np)
z_var = relay.const(z_np)
else:
s_var = relay.const(s_np[0])
z_var = relay.const(z_np[0])
real_q_op = relay.qnn.op.quantize(a_var, s_var, z_var, axis=axis, out_dtype=out_dtype)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(tvm.IRModule.from_expr(real_q_op), target=target)
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("a", a_np)
m.run()
real_q_out = m.get_output(0)
with tvm.target.Target(target):
sched = tvm.topi.testing.get_injective_schedule(target)(SIM_Q)
func = tvm.build(sched, [A, D, S, Z, SIM_Q], target, name="sim_quantize")
func(a, d, s, z, q)
mismatch = q.numpy() != real_q_out.numpy().astype("float32") |
assert np.sum(mismatch) <= 3
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_simulated_quantize():
verify_simulated_quantize([1], "int8", [1], -1)
verify_simulated_quantize([2, 5], "int8", [5], 1)
verify_simulated_quantize([1, 32, 32, 32], "int8", [32], -1)
verify_simulated_quantize([1, 32, 32, 32], "uint8", [32], -2)
verify_simulated_quantize([2, 5], "int32", [5], 1)
def verify_simulated_dequantize(data_shape, in_dtype, channels, axis):
A = te.placeholder(data_shape, name="value", dtype="float32")
D = te.placeholder([], name="dtype", dtype="int32")
S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32")
Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32")
SIM_DQ = topi.nn.simulated_dequantize(A, D, input_scale=S, input_zero_point=Z, axis=axis)
a_np = np.random.uniform(low=-128, high=127, size=data_shape).astype(in_dtype)
a_np_f = a_np.astype("float32")
d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[in_dtype])
s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32")
z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32")
dq_np = np.zeros(shape=data_shape, dtype="float32")
def check_target(target, dev):
a = tvm.nd.array(a_np_f, dev)
d = tvm.nd.array(d_np, dev)
s = tvm.nd.array(s_np, dev)
z = tvm.nd.array(z_np, dev)
dq = tvm.nd.array(dq_np, dev)
per_channel = channels[0] != 1
a_var = relay.var("a", shape=data_shape, dtype=in_dtype)
if per_channel:
s_var = relay.const(s_np)
z_var = relay.const(z_np)
else:
s_var = relay.const(s_np[0])
z_var = relay.const(z_np[0])
real_dq_op = relay.qnn.op.dequantize(a_var, s_var, z_var, axis=axis)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(tvm.IRModule.from_expr(real_dq_op), tar |
get=target)
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("a", a_np)
m.run()
real_dq_out = m.get_output(0)
with tvm.target.Target(target):
sched = tvm.topi.testing.get_injective_schedule(target)(SIM_DQ)
func = tvm.build(sched, [A, D, S, Z, SIM_DQ], target, name="sim_quantize")
func(a, d, s, z, dq)
tvm.testing.assert_allclose(dq.numpy(), real_dq_out.numpy().astype("float32"), rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_simulated_dequantize():
verify_simulated_dequantize([1], "int8", [1], -1)
verify_simulated_dequantize([2, 5], "int8", [5], 1)
verify_simulated_dequantize([2, 5], "int8", [2], 0)
verify_simulated_dequantize([1, 32, 32, 32], "int8", [32], -1)
verify_simulated_dequantize([1, 32, 32, 32], "uint8", [32], -2)
verify_simulated_dequantize([2, 5], "int32", [5], 1)
if __name__ == "__main__":
test_simulated_quantize()
test_simulated_dequantize() |
"""Test code for reduce.""" |
import os |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import te, topi
in_shape, axis, keepdims, reduce_type, dtype = tvm.testing.parameters(
((32,), 0, False, "argmax", "float32"),
((128, 24, 128, 24), (1, 2, 3), True, "sum", "float32"),
((2, 3), None, True, "all", "bool"),
((128, 24 * 128 * 24), (1,), False, "max", "float32"),
((32, 128, 24), None, True, "sum", "float32"),
((32, 128, 24), None, True, "all", "bool"),
((128, 24, 128, 24), (0, 2), False, "min", "float32"),
((32, 128), 1, True, "argmax", "float32"),
((32, 24, 32, 24), 2, False, "argmin", "float32"),
((31, 21, 15), None, True, "argmax", "float32"),
((31, 21, 15), None, False, "sum", "float32"),
((128, 24, 128, 24), (1, 2, 3), True, "sum", "float64"),
((2, 3), None, True, "any", "bool"),
((32, 128, 24), None, True, "any", "bool"),
((1, 4, 7), 1, True, "any", "bool"),
((128, 24, 128, 24), 2, False, "any", "bool"),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(in_shape, axis, keepdims, reduce_type, dtype):
if dtype == "bool":
in_npy_map = in_npy = np.random.choice([True, False], size=in_shape)
else:
in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype)
in_npy_map = np.sqrt(np.exp(in_npy)).astype(dtype)
if reduce_type == "sum":
out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims)
elif reduce_type == "all" and dtype == "bool":
out_npy = in_npy_map.all(axis=axis, keepdims=keepdims)
elif reduce_type == "any" and dtype == "bool":
out_npy = in_npy_map.any(axis=axis, keepdims=keepdims)
elif reduce_type == "max":
out_npy = in_npy_map.max(axis=axis, keepdims=keepdims)
elif reduce_type == "min":
out_npy = in_npy_map.min(axis=axis, keepdims=keepdims)
elif reduce_type == "argmax":
out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims)
elif reduce_type == "argmin":
out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
return in_npy, in |
_npy_map, out_npy
def _my_npy_argmax(arr, axis, keepdims):
if not keepdims:
return arr.argmax(axis=axis)
else:
if axis is None:
out_shape = [1 for _ in arr.shape]
else:
out_shape = list(arr.shape)
out_shape[axis] = 1
return arr.argmax(axis=axis).reshape(out_shape)
def _my_npy_argmin(arr, axis, keepdims):
if not keepdims:
return arr.argmin(axis=axis)
else:
if axis is None:
out_shape = [1 for _ in arr.shape]
else:
out_shape = list(arr.shape)
out_shape[axis] = 1
return arr.argmin(axis=axis).reshape(out_shape)
def test_reduce_map(target, dev, ref_data, in_shape, axis, keepdims, reduce_type, dtype):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and reduce_type in ["sum", "any", "all"]:
pytest.xfail(f"Vulkan backend has known errors on {reduce_type}")
in_npy, in_npy_map, out_npy = ref_data
A = te.placeholder(shape=in_shape, name="A", dtype=dtype)
A1 = topi.sqrt(topi.exp(A))
out_dtype = dtype
if reduce_type == "sum":
B = topi.sum(A1, axis=axis, keepdims=keepdims)
elif reduce_type == "all":
B = topi.all(A, axis=axis, keepdims=keepdims)
elif reduce_type == "any":
B = topi.any(A, axis=axis, keepdims=keepdims)
elif reduce_type == "max":
B = topi.max(A1, axis=axis, keepdims=keepdims)
elif reduce_type == "min":
B = topi.min(A1, axis=axis, keepdims=keepdims)
elif reduce_type == "argmax":
B = topi.argmax(A1, axis=axis, keepdims=keepdims)
out_dtype = "int32"
elif reduce_type == "argmin":
B = topi.argmin(A1, axis=axis, keepdims=keepdims)
out_dtype = "int32"
else:
raise NotImplementedError
with tvm.target.Target(target):
s = tvm.topi.testing.get_reduce_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name=reduce_type)
data_tvm = tvm.nd.array(in_npy, device=dev)
out_tvm = tv |
m.nd.empty(shape=out_npy.shape, device=dev, dtype=out_dtype)
foo(data_tvm, out_tvm)
if reduce_type == "argmax" or reduce_type == "argmin":
out_tvm_indices = out_tvm.numpy()
if keepdims:
out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis)
if axis is None:
out_tvm_val = in_npy_map.ravel()[out_tvm_indices]
else:
other_indices = tuple(np.indices(in_shape[0:axis] + in_shape[(axis + 1) :]))
sel_indices = other_indices[0:axis] + (out_tvm_indices,) + other_indices[axis:]
out_tvm_val = in_npy_map[sel_indices]
if reduce_type == "argmax":
tvm.testing.assert_allclose(out_tvm_val, in_npy_map.max(axis=axis), 1e-3, 1e-3)
elif reduce_type == "argmin":
tvm.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1e-3, 1e-3)
else:
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3)
def test_complex_reduce(target, dev):
in_shape = (2, 3)
dtype = "float32"
axis = 0
keepdims = False
A = te.placeholder(shape=in_shape, name="A", dtype=dtype)
B = topi.sum(A, axis=axis, keepdims=keepdims)
C = topi.add(B, B)
D = topi.multiply(B, B)
E = topi.add(C, D)
with tvm.target.Target(target):
s = tvm.topi.testing.get_reduce_schedule(target)(E)
foo = tvm.build(s, [A, E], target, name="sum")
in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype)
sum_npy = in_npy.sum(axis=axis, keepdims=keepdims)
out_npy = sum_npy * 2 + sum_npy * sum_npy
data_tvm = tvm.nd.array(in_npy, device=dev)
out_tvm = tvm.nd.empty(shape=out_npy.shape, device=dev, dtype=dtype)
foo(data_tvm, out_tvm)
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3)
if __name__ == "__main__":
tvm.testing.main() |
"""Test code for relu activation""" |
import sys |
import os |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.topi.testing
from tvm.topi.utils |
import get_const_tuple
from tvm.contrib.nvcc |
import have_fp16 |
import pytest |
import tvm.testing
m, n, dtype = tvm.testing.parameters(
(10, 128, "float32"),
(128, 64, "float16"),
)
def test_relu(target, dev, m, n, dtype):
A = te.placeholder((m, n), name="A", dtype=dtype)
B = topi.nn.relu(A)
a_np = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0)
if dtype == "float16" and target == "cuda" and not have_fp16(tvm.cuda(0).compute_version):
pytest.skip("Skip because %s does not have fp16 support" % target)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_elemwise_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
foo = tvm.build(s, [A, B], target, name="relu")
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
size, alpha = tvm.testing.parameters((100, 0.1))
def test_leaky_relu(size, alpha):
A = te.placeholder((size,), name="A")
B = topi.nn.leaky_relu(A, alpha)
s = te.create_schedule([B.op])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0) + a_np * (a_np < 0) * alpha
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
foo = tvm.build(s, [A, B], "llvm", name="leaky_relu")
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
x, w, axis, weight_reshape = tvm.testing.parameters(
((1, 3, 2, 2), (3,), 1, (3, 1, 1)),
((1, 3, 2, 2), (2,), 2, (2, 1)),
((1, 3), (3,), 1, (3,)),
)
def test_prelu(x, w, axis, weight_reshape):
X = te.placeholder((x), name="X")
W = te.placeholder((w), name="W")
x_np = np.random.uniform(low=-1.0, hig |
h=1.0, size=get_const_tuple(X.shape)).astype(X.dtype)
w_np = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(W.shape)).astype(W.dtype)
def _prelu_numpy(x, W):
return (x < 0) * (x * W.reshape(weight_reshape)) + (x >= 0) * x
B = topi.nn.prelu(X, W, axis)
s = te.create_schedule([B.op])
dev = tvm.cpu(0)
x_tvm = tvm.nd.array(x_np, dev)
w_tvm = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(X.shape), dtype=B.dtype), dev)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
foo = tvm.build(s, [X, W, B], "llvm", name="prelu")
foo(x_tvm, w_tvm, b)
out_np = _prelu_numpy(x_np, w_np)
tvm.testing.assert_allclose(b.numpy(), out_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main() |
"""Example code to do reorg.""" |
import numpy as np
from tvm |
import topi
from tvm.topi.utils |
import get_const_tuple |
import tvm
from tvm |
import te |
import tvm.topi.testing |
import tvm.testing
_reorg_schedule = {
"generic": topi.generic.schedule_reorg,
"gpu": topi.cuda.schedule_reorg,
}
def verify_reorg(batch, in_size, in_channel, stride):
"""Verify reorg operator by comparing outputs from tvm and numpy implementation"""
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
B = topi.vision.reorg(A, stride)
a_shape = get_const_tuple(A.shape)
dtype = A.dtype
def get_ref_data_reorg():
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_np = tvm.topi.testing.reorg_python(a_np, stride)
return a_np, b_np
a_np, b_np = get_ref_data_reorg()
def check_device(device):
"""Cheching devices is enabled or not"""
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
s_func = tvm.topi.testing.dispatch(device, _reorg_schedule)
s = s_func([B])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, B], device)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
@tvm.testing.uses_gpu
def test_reorg():
verify_reorg(1, 20, 8, 2)
if __name__ == "__main__":
test_reorg() |
from typing |
import Callable |
import numpy as np |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import topi
topi_funcs = {
"cumsum": {"generic": topi.cumsum, "cuda": topi.cuda.cumsum},
"cumprod": {"generic": topi.cumprod, "cuda": topi.cuda.cumprod},
}
identity_value = {"cumsum": 0, "cumprod": 1}
def get_implementations(name, axis, dtype, exclusive):
topi_func_generic = topi_funcs[name]["generic"]
topi_func_cuda = topi_funcs[name]["cuda"]
return {
"generic": (
lambda x: topi_func_generic(x, axis, dtype, exclusive=exclusive),
topi.generic.schedule_extern,
),
"cuda": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"nvptx": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"vulkan": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"metal": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
}
def _run_tests(
dev,
target,
op_name: str = "cumsum",
gt_func: Callable[..., np.array] = np.cumsum,
):
def check_scan(np_ref, data, axis=None, dtype=None, exclusive=False):
implementations = get_implementations(op_name, axis, dtype, exclusive)
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
tvm.topi.testing.compare_numpy_tvm([data], np_ref, target, dev, fcompute, fschedule)
data = np.array([2, 3, 0])
check_scan(gt_func(data), data)
data = np.random.rand(10) > 0.5
data = data.astype(np.int32)
check_scan(gt_func(data, dtype=np.int32), data)
check_scan(gt_func(data), data, dtype="int64")
data = np.random.rand(10) > 0.5
check_scan(gt_func(data, dtype=np.int32), data, dtype="int32")
for in_dtype in ["float32", "float64"]:
if target == "metal" and in_dtype == "float64":
continue
data = |
np.random.randn(10, 10).astype(in_dtype)
check_scan(gt_func(data), data)
check_scan(gt_func(data, axis=0), data, axis=0)
check_scan(gt_func(data, axis=1), data, axis=1)
data = np.random.randn(10, 5, 10).astype(in_dtype)
check_scan(gt_func(data), data)
check_scan(gt_func(data, axis=0), data, axis=0)
check_scan(gt_func(data, axis=1), data, axis=1)
check_scan(gt_func(data, axis=-1), data, axis=-1)
for in_dtype in ["int32", "int64"]:
data = np.random.randint(-100, 100, size=(100, 100)).astype(in_dtype)
check_scan(gt_func(data, dtype=in_dtype), data)
check_scan(gt_func(data), data, dtype="int64")
check_scan(gt_func(data, axis=0, dtype=in_dtype), data, axis=0)
check_scan(gt_func(data, axis=1, dtype=in_dtype), data, axis=1)
data = np.random.randint(1 << 30, (1 << 31) - 1, size=(100)).astype(in_dtype)
check_scan(gt_func(data), data, dtype="int64")
data = np.random.randint(-100, 100, size=(100, 100)).astype("int64")
expected_result = np.roll(gt_func(data), 1)
expected_result[0] = identity_value[op_name]
check_scan(expected_result, data, dtype="int64", exclusive=True)
expected_result = np.roll(gt_func(data, axis=0, dtype=in_dtype), 1, axis=0)
expected_result[0, :] = identity_value[op_name]
check_scan(expected_result, data, axis=0, exclusive=True)
expected_result = np.roll(gt_func(data, axis=1, dtype=in_dtype), 1, axis=1)
expected_result[:, 0] = identity_value[op_name]
check_scan(gt_func(data, axis=1, dtype=in_dtype), data, axis=1)
@tvm.testing.parametrize_targets
def test_cumsum(dev, target):
_run_tests(dev, target, op_name="cumsum", gt_func=np.cumsum)
@tvm.testing.parametrize_targets
def test_cumprod(dev, target):
_run_tests(dev, target, op_name="cumprod", gt_func=np.cumprod)
if __name__ == "__main__":
test_cumsum(tvm.device("cpu"), tvm.target.Target("llvm"))
test_cumsum(tvm.device("cuda"), tvm.target.Target("cuda"))
test_cumsu |
m(tvm.device("nvptx"), tvm.target.Target("nvptx"))
test_cumsum(tvm.device("vulkan"), tvm.target.Target("vulkan"))
test_cumsum(tvm.device("metal"), tvm.target.Target("metal"))
test_cumprod(tvm.device("cpu"), tvm.target.Target("llvm"))
test_cumprod(tvm.device("cuda"), tvm.target.Target("cuda"))
test_cumprod(tvm.device("nvptx"), tvm.target.Target("nvptx"))
test_cumprod(tvm.device("vulkan"), tvm.target.Target("vulkan"))
test_cumprod(tvm.device("metal"), tvm.target.Target("metal")) |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import topi |
import tvm.topi.testing
@tvm.testing.parametrize_targets
def test_scatter_nd(dev, target):
def check_scatter_nd(data, indices, updates, out, mode="add"):
implementations = {
"generic": (
lambda x, y, z: topi.scatter_nd(x, y, z, mode),
topi.generic.schedule_extern,
),
"gpu": (
lambda x, y, z: topi.cuda.scatter_nd(x, y, z, mode),
topi.generic.schedule_extern,
),
"cpu": (
lambda x, y, z: topi.x86.scatter_nd(x, y, z, mode),
topi.generic.schedule_extern,
),
}
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
tvm.topi.testing.compare_numpy_tvm(
[data, indices, updates], out, target, dev, fcompute, fschedule
)
data = np.zeros((2, 2)).astype("int64")
indices = np.array([[1, 1, 0], [0, 1, 0]])
updates = np.array([2, 3, 0])
out = np.array([[0, 0], [2, 3]])
check_scatter_nd(data, indices, updates, out)
data = np.zeros((2, 2, 2, 2)).astype("int64")
indices = np.array([[0, 1], [1, 1]])
updates = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
out = np.array([[[[0, 0], [0, 0]], [[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]]])
check_scatter_nd(data, indices, updates, out)
indices = np.array([[1, 0, 0]])
updates = np.reshape(np.arange(1560 * 3), (3, 1560)).astype("float32")
shape = (2, 1560)
data = np.zeros(shape).astype("float32")
out = data.copy()
out[1, :] += updates[0, :]
out[0, :] += updates[1, :]
out[0, :] += updates[2, :]
check_scatter_nd(data, indices, updates, out)
for mode in ["add", "update"]:
updates = np.ones((5, 3)).astype("float64")
indices = np.stack((np.random.randint(2, size=5), np.random.randint(7, size=5))).astype(
"int64"
)
shape = (2, 7, 3)
data = np.random.random(shape).astype("float64")
out = data.copy() |
for i in range(indices.shape[1]):
for j in range(updates.shape[1]):
if mode == "add":
out[indices[0, i], indices[1, i], j] += updates[i, j]
elif mode == "update":
out[indices[0, i], indices[1, i], j] = updates[i, j]
check_scatter_nd(data, indices, updates, out, mode)
if __name__ == "__main__":
test_scatter_nd(tvm.device("cpu"), tvm.target.Target("llvm")) |
import numpy as np |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm.topi.testing |
import searchsorted_ref
from tvm |
import te, topi
topi_funcs = {"generic": topi.searchsorted, "cuda": topi.cuda.searchsorted}
def get_implementations():
topi_func_generic = topi_funcs["generic"]
topi_func_cuda = topi_funcs["cuda"]
return {
"generic": (
lambda x, y, side, out_dtype: topi_func_generic(x, y, side, out_dtype),
topi.generic.schedule_extern,
),
"cuda": (
lambda x, y, side, out_dtype: topi_func_cuda(x, y, side, out_dtype),
topi.cuda.schedule_extern,
),
"vulkan": (
lambda x, y, side, out_dtype: topi_func_cuda(x, y, side, out_dtype),
topi.cuda.schedule_extern,
),
}
@tvm.testing.parametrize_targets
def test_searchsorted(dev, target):
def verify_with_input(sorted_sequence_np, values_np, right):
sorted_sequence = te.placeholder(sorted_sequence_np.shape, dtype="float32")
values = te.placeholder(values_np.shape, dtype="float32")
out_dtype = "int32"
implementations = get_implementations()
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
with tvm.target.Target(target):
indices = fcompute(sorted_sequence, values, right, out_dtype)
s = fschedule([indices])
func = tvm.build(s, [sorted_sequence, values, indices], target=target)
dev = tvm.device(target, 0)
a = tvm.nd.array(sorted_sequence_np, dev)
b = tvm.nd.array(values_np, dev)
c = tvm.nd.array(np.zeros(values_np.shape, dtype=indices.dtype), dev)
func(a, b, c)
ref = searchsorted_ref(sorted_sequence_np, values_np, right, out_dtype)
np.testing.assert_equal(c.numpy(), ref)
def verify(sequence_len, num_search, outer_axes, right, sorted_sequence_1d=False):
if sorted_sequence_1d:
sorted_sequence_shape = (sequence_len,)
else:
sorted_sequence_shape = outer_axes + (sequence_len,)
values_shape = outer_axes + (num_search,)
verify_with_input( |
np.sort(np.random.randn(*sorted_sequence_shape).astype("float32"), axis=-1),
np.random.randn(*values_shape).astype("float32"),
right,
)
verify(1024, 1000, (10, 5, 3), False)
verify(999, 2000, (10, 5, 3), True)
verify(1000, 1000, (), False)
verify(2001, 100, (500,), True)
verify(2001, 100, (500,), False, sorted_sequence_1d=True)
for right in [True, False]:
sorted_sequence = np.array([1, 2, 3, 4, 5], dtype="float32")
verify_with_input(sorted_sequence, np.array([6], dtype="float32"), right)
verify_with_input(sorted_sequence, np.array([0], dtype="float32"), right) |
"""Test code for softmax""" |
import logging |
import os |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import te, topi
from tvm.topi.utils |
import get_const_tuple
_softmax_schedule = {
"generic": topi.generic.schedule_softmax,
"cpu": topi.x86.schedule_softmax,
"gpu": topi.cuda.schedule_softmax,
"hls": topi.hls.schedule_softmax,
}
dtype = tvm.testing.parameter("float32", "float64")
configs = {
"softmax": {
"topi": topi.nn.softmax,
"ref": tvm.topi.testing.softmax_python,
"dimensions": [1, 2, 4],
"axis": [0, 1, 2, 3],
},
"log_softmax": {
"topi": topi.nn.log_softmax,
"ref": tvm.topi.testing.log_softmax_python,
"dimensions": [2, 3],
"axis": [1],
},
}
shapes = [(32, 10), (3, 4), (1, 16, 256, 256), (32,)]
softmax_operation, shape, axis = tvm.testing.parameters(
*[
(name, shape, axis)
for name, config in configs.items()
for shape in shapes
if len(shape) in config["dimensions"]
for axis in range(len(shape))
if axis in config["axis"]
]
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(shape, dtype, softmax_operation, axis):
ref_func = configs[softmax_operation]["ref"]
a_np = np.random.uniform(size=shape).astype(dtype)
perm = list(range(a_np.ndim))
perm[-1], perm[axis] = perm[axis], perm[-1]
trans_shape = [a_np.shape[i] for i in perm]
a_np_2d = a_np.transpose(perm).reshape(-1, trans_shape[-1])
b_np_2d = ref_func(a_np_2d)
b_np = b_np_2d.reshape(*trans_shape).transpose(perm)
return a_np, b_np
def test_softmax(target, dev, shape, dtype, ref_data, softmax_operation, axis):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and dtype == "float64":
pytest.xfail("Vulkan GLSL.std.450 does not support 64-bit floats")
A = te.placeholder(shape, dtype=dtype, name="A")
topi_op = configs[softmax_operation]["topi"]
B = topi_op(A, axis=axis)
with tvm.target.Target(target):
fschedule = tvm.topi.testing.dispatch(target, _softmax_schedule)
s = fschedule(B)
a_np, b_np = ref_data
a = tvm.nd |
.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main() |
"""Test code for vision package""" |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import te, topi
_sort_implement = {
"generic": (topi.sort, topi.generic.schedule_sort),
"gpu": (topi.cuda.sort, topi.cuda.schedule_sort),
}
_argsort_implement = {
"generic": (topi.argsort, topi.generic.schedule_argsort),
"gpu": (topi.cuda.argsort, topi.cuda.schedule_argsort),
}
_topk_implement = {
"generic": (topi.topk, topi.generic.schedule_topk),
"gpu": (topi.cuda.topk, topi.cuda.schedule_topk),
}
axis = tvm.testing.parameter(0, -1, 1)
is_ascend = tvm.testing.parameter(True, False, ids=["is_ascend", "not_ascend"])
dtype = tvm.testing.parameter("int64", "float32")
topk = tvm.testing.parameter(0, 1, 5)
topk_ret_type = tvm.testing.parameter("values", "indices", "both")
def test_sort(target, dev, axis, is_ascend):
np.random.seed(0)
dshape = (20, 100)
data_dtype = "float32"
data = te.placeholder(dshape, name="data", dtype=data_dtype)
perm = np.arange(dshape[0] * dshape[1], dtype=data_dtype)
np.random.shuffle(perm)
np_data = perm.reshape(dshape)
if is_ascend:
np_sort = np.sort(np_data, axis=axis)
else:
np_sort = -np.sort(-np_data, axis=axis)
if axis == 0:
np_sort = np_sort[: dshape[axis], :]
else:
np_sort = np_sort[:, : dshape[axis]]
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _sort_implement)
out = fcompute(data, axis=axis, is_ascend=is_ascend)
s = fschedule(out)
tvm_data = tvm.nd.array(np_data, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_sort, rtol=1e0)
def test_argsort(target, dev, axis, is_ascend):
dshape = (20, 100)
data_dtype = "float32"
data = te.placeholder(dshape, name="data", dtype=data_dtype)
perm = np.arange(dshape[0] * dshape[1], dtype=data_dtype)
np.random.shuffle(perm)
np_data = perm.reshape(dshape)
if is_ascend:
np_indices = np.arg |
sort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
if axis == 0:
np_indices = np_indices[: dshape[axis], :]
else:
np_indices = np_indices[:, : dshape[axis]]
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _argsort_implement)
out = fcompute(data, axis=axis, is_ascend=is_ascend)
s = fschedule(out)
tvm_data = tvm.nd.array(np_data, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_indices.astype(data_dtype), rtol=1e0)
def test_topk(target, dev, topk, axis, topk_ret_type, is_ascend, dtype):
np.random.seed(0)
shape = (20, 100)
data_dtype = "float32"
data = te.placeholder(shape, name="data", dtype=data_dtype)
np_data = np.random.uniform(size=shape).astype(data_dtype)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
kk = topk if topk >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype(data_dtype)
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype(data_dtype)
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _topk_implement)
outs = fcompute(data, topk, axis, topk_ret_type, is_ascend, dtype)
outs = outs if isinstance(outs, list) else [outs]
s = fschedule(outs)
tvm_data = tvm.nd.array(np_data, dev)
tvm_res = []
for t in outs:
tvm_res.append(tvm.nd.empty(t.shape, dtype=t.dtype, device=dev)) |
f = tvm.build(s, [data] + outs, target)
f(tvm_data, *tvm_res)
if topk_ret_type == "both":
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_values)
tvm.testing.assert_allclose(tvm_res[1].numpy(), np_indices)
elif topk_ret_type == "values":
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_values)
else:
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_indices)
if __name__ == "__main__":
tvm.testing.main() |
"""Test code for space to batch""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
def verify_space_to_batch_nd(input_shape, block_shape, pad_before, pad_after, pad_value=0):
out_shape = []
out_shape.append(int((input_shape[0] * np.prod(block_shape))))
for i in range(1, len(block_shape) + 1):
pad = pad_before[i - 1] + pad_after[i - 1]
out_shape.append(int((input_shape[i] + pad)
for i in range(len(block_shape) + 1, len(input_shape)):
out_shape.append(input_shape[i])
A = te.placeholder(input_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=input_shape).astype(dtype)
B = topi.nn.space_to_batch_nd(A, block_shape, pad_before, pad_after, pad_value)
b_np = tvm.topi.testing.space_to_batch_nd_python(
a_np, block_shape, pad_before, pad_after, pad_value
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.create(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_space_to_batch():
verify_space_to_batch_nd([3, 3, 2, 1], [3], [0], [0])
verify_space_to_batch_nd([3, 3, 2, 1], [3], [1], [2])
verify_space_to_batch_nd([3, 3, 4, 5, 2], [3, 4, 2], [1, 0, 3], [2, 0, 0])
verify_space_to_batch_nd([3, 3, 4, 5, 2], [3, 4, 2, 2], [1, 4, 0, 0], [2, 0, 1, 0])
if __name__ == "__main__":
test_space_to_batch() |
"""Test code for space to depth""" |
import numpy as np |
import tvm
from tvm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.