text
stringlengths 1
2.05k
|
---|
import testing
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_fastmath(target, dev):
def test_apply(relay_op, name, f_numpy, low, high, step, dtype="float32"):
a_np = np.arange(low, high, step).astype(dtype).reshape((1, -1))
b_np = f_numpy(a_np)
x = relay.var("x", shape=a_np.shape, dtype="float32")
y = relay_op(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
graph, lib, params = relay.build(mod, target=target, params=None)
func_name = "tvmgen_default_fused_" + name
assert func_name in graph
m = graph_executor.create(graph, lib, dev)
m.set_input("x", tvm.nd.array(a_np, dev))
m.set_input(**params)
m.run()
tvm_output = m.get_output(0)
tvm.testing.assert_allclose(tvm_output.numpy(), b_np, rtol=1e-5, atol=1e-5)
test_apply(relay.exp, "fast_exp", np.exp, low=-88, high=88, step=0.01)
test_apply(relay.erf, "fast_erf", scipy.special.erf, low=-10, high=10, step=0.01)
test_apply(relay.tanh, "fast_tanh", np.tanh, low=-10, high=10, step=0.01)
test_apply(
relay.nn.fast_softmax,
"nn_fast_softmax",
tvm.topi.testing.softmax_python,
low=-10,
high=10,
step=0.01,
)
if __name__ == "__main__":
test_fastmath() |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import te, relay
from tvm.relay.testing |
import check_grad, run_infer_type
from tvm.relay.transform |
import gradient
executor_kind = tvm.testing.parameter("debug")
def sigmoid(x):
one = np.ones_like(x)
return one / (one + np.exp(-x))
def relu(x):
x_copy = np.copy(x)
np.maximum(x_copy, 0, x_copy)
return x_copy
class TestUnaryOp:
config = {
"log": (tvm.relay.log, lambda x, g: g * (1 / x)),
"exp": (tvm.relay.exp, lambda x, g: g * np.exp(x)),
"sigmoid": (tvm.relay.sigmoid, lambda x, g: g * sigmoid(x) * (1 - sigmoid(x))),
"tanh": (tvm.relay.tanh, lambda x, g: g * (1 - np.tanh(x) * np.tanh(x))),
"sqrt": (tvm.relay.sqrt, lambda x, g: g * 0.5 * np.power(x, -0.5)),
"abs": (tvm.relay.abs, lambda x, g: np.where(x < 0, -g, g)),
"relu": (relay.nn.relu, lambda x, g: np.where(x < 0, np.zeros_like(x), g)),
"erf": (tvm.relay.erf, lambda x, g: g * (2.0 / (np.pi ** (0.5)) * np.exp(-x * x))),
"cos": (tvm.relay.cos, lambda x, g: g * -1.0 * np.sin(x)),
"sin": (tvm.relay.sin, lambda x, g: g * np.cos(x)),
"tan": (tvm.relay.tan, lambda x, g: g * (1.0 / (np.cos(x) ** 2))),
"atan": (tvm.relay.atan, lambda x, g: g * (1 / (1 + np.power(x, 2.0)))),
"log2": (tvm.relay.log2, lambda x, g: g * (1 / (np.log(2) * x))),
"log10": (tvm.relay.log10, lambda x, g: g * (1 / (np.log(10) * x))),
"cosh": (tvm.relay.cosh, lambda x, g: g * (np.sinh(x))),
"sinh": (tvm.relay.sinh, lambda x, g: g * (np.cosh(x))),
"asin": (tvm.relay.asin, lambda x, g: g * (1.0 / (1.0 - x**2) ** (1.0 / 2.0))),
"acos": (tvm.relay.acos, lambda x, g: g * (-1.0 / (1.0 - x**2.0) ** (1.0 / 2.0))),
"acosh": (tvm.relay.acosh, lambda x, g: g * (1.0 / (x**2 - 1.0) ** (1.0 / 2.0))),
"asinh": (tvm.relay.asinh, lambda x, g: g * (1.0 / (x**2 + 1.0) ** (1.0 / 2.0))),
"atanh": (tvm.relay.atanh, lambda x, g: g * (-1.0 / (x**2 - 1.0))),
}
relay_op, ref_func = tvm.testing.parameters(*config.values(), ids=config.keys())
dtype = tvm.testing.parameter("float32", "float64")
shape = tvm.testi |
ng.parameter((10, 4))
def test_op(self, target, dev, executor_kind, relay_op, ref_func, shape, dtype):
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
known_breaks = {
"float32": [
tvm.relay.erf,
tvm.relay.tan,
tvm.relay.atan,
tvm.relay.log10,
tvm.relay.cosh,
tvm.relay.sinh,
tvm.relay.asin,
tvm.relay.acos,
tvm.relay.acosh,
tvm.relay.asinh,
tvm.relay.atanh,
],
"float64": [
tvm.relay.log,
tvm.relay.exp,
tvm.relay.sigmoid,
tvm.relay.tanh,
tvm.relay.sqrt,
tvm.relay.erf,
tvm.relay.cos,
tvm.relay.sin,
tvm.relay.tan,
tvm.relay.atan,
tvm.relay.log2,
tvm.relay.log10,
tvm.relay.cosh,
tvm.relay.sinh,
tvm.relay.asin,
tvm.relay.acos,
tvm.relay.acosh,
tvm.relay.asinh,
tvm.relay.atanh,
],
}
if relay_op in known_breaks[dtype]:
pytest.xfail(f"{dtype} {relay_op.__name__} not yet supported on Vulkan runtime")
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
g = relay.var("g", tp)
y = relay_op(x) * g
fwd_func = relay.Function([x, g], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data_in = np.random.rand(*shape).astype(dtype)
grad_in = np.random.rand(*shape).astype(dtype)
ref_grad_out = ref_func(data_in, grad_in)
op_res, (op_grad, _) = relay.create_e |
xecutor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data_in, grad_in)
np.testing.assert_allclose(op_grad.numpy(), ref_grad_out, rtol=0.01)
class TestBinaryOp:
config = {
"add": (relay.add, lambda x, y: [np.ones_like(x), np.ones_like(y)]),
"subtract": (relay.subtract, lambda x, y: [np.ones_like(x), -np.ones_like(y)]),
"multiply": (relay.multiply, lambda x, y: [y, x]),
"divide": (relay.divide, lambda x, y: [1 / y, -x / (y**2)]),
}
relay_op, ref_func = tvm.testing.parameters(*config.values(), ids=config.keys())
dtype = tvm.testing.parameter("float32", "float64")
shape = tvm.testing.parameter((5, 10, 5))
def test_binary_op(self, target, dev, executor_kind, relay_op, ref_func, shape, dtype):
t = relay.TensorType(shape, dtype=dtype)
x = relay.var("x", t)
y = relay.var("y", t)
z = relay_op(x, y)
x_data = np.random.rand(*shape).astype(t.dtype)
y_data = np.random.rand(*shape).astype(t.dtype)
ref_grad0, ref_grad1 = ref_func(x_data, y_data)
fwd_func = relay.Function([x, y], z)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
op_res, (op_grad0, op_grad1) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(x_data, y_data)
np.testing.assert_allclose(op_grad0.numpy(), ref_grad0, rtol=0.01)
np.testing.assert_allclose(op_grad1.numpy(), ref_grad1, rtol=0.01)
def test_softmax_grad(executor_kind, target, dev):
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
pytest.xfail("Known failure on vulkan")
data = relay.var("data", relay.TensorType((1, 16), "float64"))
fwd_func = relay.Function([data], relay.nn.softmax(data))
check_grad(fwd_func, scale=1, target_devices=[(target, dev)], executor_kind=executor_kind)
def test_log_softmax_grad(executor_kind, target, dev):
target = tvm.target.Tar |
get(target)
if target.kind.name == "vulkan":
pytest.xfail("Known failure on vulkan")
data = relay.var("data", relay.TensorType((2, 16), "float64"))
fwd_func = relay.Function([data], relay.nn.log_softmax(data))
check_grad(fwd_func, scale=1, target_devices=[(target, dev)], executor_kind=executor_kind)
class TestBiasAddGrad:
d_shape, b_shape, axis = tvm.testing.parameters(
((1, 16), (16,), 1),
((1, 8, 2, 2), (8,), 1),
((1, 2, 2, 8), (8,), 3),
((4, 8), (8,), 1),
)
def test_bias_add(self, executor_kind, target, dev, d_shape, b_shape, axis):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
bias = relay.var("bias", relay.TensorType(b_shape, "float32"))
fwd_func = relay.Function([data, bias], relay.nn.bias_add(data, bias, axis=axis))
check_grad(fwd_func, target_devices=[(target, dev)], executor_kind=executor_kind)
def test_expand_dims_grad(executor_kind, target, dev):
data = relay.var("data", shape=(2, 3), dtype="float64")
fwd_func = relay.Function([data], relay.expand_dims(data, axis=1, num_newaxis=2))
check_grad(fwd_func, target_devices=[(target, dev)], executor_kind=executor_kind)
def test_concatenate_grad(executor_kind, target, dev):
x = relay.var("x", shape=(2, 2, 5))
y = relay.var("y", shape=(2, 1, 5))
z = relay.var("z", shape=(2, 4, 5))
fwd_func = relay.Function([x, y, z], relay.concatenate([x, y, z], axis=1))
check_grad(fwd_func, target_devices=[(target, dev)], executor_kind=executor_kind)
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.relay.testing |
import check_grad
index_dtype = tvm.testing.parameter("int32", "int64")
val_dtype = tvm.testing.parameter("float32", "float64")
executor_kind = tvm.testing.parameter("debug")
def test_cross_entropy_grad(executor_kind, target, dev, val_dtype):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and val_dtype == "float64":
pytest.xfail("Known failing test case for vulkan runtime")
x = relay.var("x", shape=(2, 5), dtype=val_dtype)
y = relay.var("y", shape=(2, 5), dtype=val_dtype)
check_grad(
relay.Function([x, y], relay.op.nn.cross_entropy(x, y)),
eps=0.01,
scale=0.1,
mean=1,
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_cross_entropy_with_logits_grad(executor_kind, target, dev, val_dtype):
x = relay.var("x", shape=(2, 5), dtype=val_dtype)
y = relay.var("y", shape=(2, 5), dtype=val_dtype)
check_grad(
relay.Function([x, y], relay.op.nn.cross_entropy_with_logits(x, y)),
eps=0.01,
scale=0.1,
mean=1,
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_checkpoint(executor_kind, target, dev):
inputs = [relay.var("x{}".format(i), shape=(1,)) for i in range(4)]
output = relay.multiply(relay.add(inputs[0], inputs[1]), relay.add(inputs[2], inputs[3]))
check_grad(
relay.Function(inputs, relay.annotation.checkpoint(output)), executor_kind=executor_kind
)
scope = relay.ScopeBuilder()
out_tuple = scope.let(
"out_tuple",
relay.Tuple([relay.add(inputs[0], inputs[1]), relay.multiply(inputs[2], inputs[3])]),
)
scope.ret(
relay.subtract(
relay.annotation.checkpoint(relay.TupleGetItem(out_tuple, 0)),
relay.TupleGetItem(out_tuple, 1),
)
)
out_single = scope.get()
check_grad(
relay.Function(inputs, out_single),
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
class Tes |
tBatchMatmulGrad:
a_shape, b_shape, transpose_a, transpose_b = tvm.testing.parameters(
((2, 3, 5), (2, 5, 4), False, False),
((2, 3, 5), (2, 4, 5), False, True),
((2, 5, 3), (2, 5, 4), True, False),
((2, 5, 3), (2, 4, 5), True, True),
)
def test_batch_matmul_grad(
self, executor_kind, target, dev, a_shape, b_shape, transpose_a, transpose_b
):
tensor_a = relay.var("tensor_a", relay.TensorType(a_shape, "float32"))
tensor_b = relay.var("tensor_b", relay.TensorType(b_shape, "float32"))
check_grad(
relay.Function(
[tensor_a, tensor_b],
relay.op.nn.batch_matmul(
tensor_a, tensor_b, transpose_a=transpose_a, transpose_b=transpose_b
),
),
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_reverse_reshape_grad(executor_kind, target, dev):
x = relay.var("x", shape=(3, 4, 5), dtype="float64")
check_grad(
relay.Function([x], relay.op.reverse_reshape(x, (-1, 0))),
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_one_hot_grad(executor_kind, target, dev, index_dtype, val_dtype):
indices_shape = (3, 4)
depth = 5
axis = -1
inputs = [
np.random.randint(depth, size=indices_shape, dtype=index_dtype),
np.array(np.random.randn() * 1e-5).astype(val_dtype),
np.array(np.random.randn() * 1e-5).astype(val_dtype),
]
test_inputs = inputs[1:]
indices = relay.var("indices", shape=indices_shape, dtype=index_dtype)
on_val = relay.var("on_val", shape=tuple(), dtype=val_dtype)
off_val = relay.var("off_val", shape=tuple(), dtype=val_dtype)
y = relay.one_hot(indices, on_val, off_val, depth, axis, val_dtype)
f = relay.Function([indices, on_val, off_val], y)
check_grad(
f,
inputs=inputs,
test_inputs=test_inputs,
target_devices=[(target, dev)],
executor_kin |
d=executor_kind,
)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import pytest
from tvm |
import topi |
import tvm.topi.testing |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.testing |
import check_grad, run_infer_type, run_opt_pass
from tvm.relay.transform |
import gradient |
import tvm.testing
executor_kind = tvm.testing.parameter("debug")
def verify_max_pool2d_grad(executor_kind, x_shape, pool_size, strides, padding, ceil_mode):
x = relay.var("x", relay.TensorType(x_shape, "float32"))
y = tvm.relay.nn.max_pool2d(
x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode
)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype("float32")
ph, pw = padding
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=pool_size,
strides=strides,
padding=[ph, pw, ph, pw],
pool_type="max",
ceil_mode=ceil_mode,
)
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_max_pool2d_grad(executor_kind):
verify_max_pool2d_grad(
executor_kind,
(1, 4, 16, 16),
pool_size=(2, 2),
strides=(2, 2),
padding=(0, 0),
ceil_mode=False,
)
verify_max_pool2d_grad(
executor_kind,
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
)
def verify_avg_pool2d_grad(
x_shape,
pool_size,
strides,
padding,
ceil_mode,
count_include_pad,
executor_kind,
dtype="float32",
):
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in x_shape], dtype=dtype)
y = tvm.relay.nn.avg_pool2d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode= |
ceil_mode,
count_include_pad=count_include_pad,
)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype(dtype)
ph, pw = padding
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=pool_size,
strides=strides,
padding=[ph, pw, ph, pw],
pool_type="avg",
ceil_mode=ceil_mode,
)
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_avg_pool2d_grad(executor_kind):
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(2, 2),
strides=(2, 2),
padding=(0, 0),
ceil_mode=False,
count_include_pad=True,
executor_kind=executor_kind,
)
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
count_include_pad=False,
executor_kind=executor_kind,
)
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
count_include_pad=False,
executor_kind=executor_kind,
dtype="int32",
)
def verify_global_avg_pool2d_grad(executor_kind, x_shape):
x = relay.var("x", relay.TensorType(x_shape, "float32"))
y = tvm.relay.nn.global_avg_pool2d(x)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_sh |
ape).astype("float32")
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=(x_shape[2], x_shape[3]),
strides=(1, 1),
padding=[0, 0, 0, 0],
pool_type="avg",
ceil_mode=False,
)
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_global_avg_pool2d_grad(executor_kind):
verify_global_avg_pool2d_grad(executor_kind, (1, 4, 16, 16))
verify_global_avg_pool2d_grad(executor_kind, (1, 8, 8, 24))
def verify_conv2d_grad(
dshape, wshape, strides, padding, dilation, groups=1, mode="higher_order", executor_kind="vm"
):
dtype = "float32"
data = relay.var("data", shape=dshape, dtype=dtype)
weight = relay.var("weight", shape=wshape, dtype=dtype)
conv = relay.nn.conv2d(
data,
weight,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
out_dtype=dtype,
)
fwd_func = relay.Function([data, weight], conv)
check_grad(fwd_func, mode=mode, executor_kind=executor_kind)
@tvm.testing.uses_gpu
def test_conv2d_grad(executor_kind):
verify_conv2d_grad(
(1, 4, 16, 16), (16, 4, 3, 3), [1, 1], [1, 1], [1, 1], executor_kind=executor_kind
)
verify_conv2d_grad(
(1, 4, 16, 16), (16, 4, 1, 1), [1, 1], [0, 0], [1, 1], executor_kind=executor_kind
)
verify_conv2d_grad(
(1, 4, 16, 16), (16, 4, 1, 1), [2, 2], [0, 0], [1, 1], executor_kind=executor_kind
)
verify_conv2d_grad(
(1, 4, 16, 16),
(16, 4, 3, 3),
[1, 1],
[1, 1],
[1, 1],
mode="first_order",
executor_kind=executor_kind,
)
def verify_dense_grad(d_shape, |
w_shape, executor_kind):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
weight = relay.var("weight", relay.TensorType(w_shape, "float32"))
fwd_func = relay.Function([data, weight], relay.nn.dense(data, weight))
check_grad(fwd_func, executor_kind=executor_kind)
def test_dense_grad(executor_kind):
verify_dense_grad((1, 8), (16, 8), executor_kind)
verify_dense_grad((1, 4), (3, 4), executor_kind)
verify_dense_grad((5, 4), (3, 4), executor_kind)
def verify_matmul_grad(a_shape, b_shape, transpose_a, transpose_b, executor_kind):
tensor_a = relay.var("tensor_a", relay.TensorType(a_shape, "float32"))
tensor_b = relay.var("tensor_b", relay.TensorType(b_shape, "float32"))
fwd_func = relay.Function(
[tensor_a, tensor_b],
relay.nn.matmul(tensor_a, tensor_b, transpose_a=transpose_a, transpose_b=transpose_b),
)
check_grad(fwd_func, executor_kind=executor_kind)
def test_matmul_grad(executor_kind):
verify_matmul_grad((1, 8), (8, 16), False, False, executor_kind)
verify_matmul_grad((4, 1), (4, 3), True, False, executor_kind)
verify_matmul_grad((4, 5), (3, 4), True, True, executor_kind)
def verify_batch_flatten_grad(d_shape, executor_kind):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function([data], relay.nn.batch_flatten(data))
check_grad(fwd_func, executor_kind=executor_kind)
def test_batch_flatten_grad(executor_kind):
verify_batch_flatten_grad((1, 2, 3, 4), executor_kind)
verify_batch_flatten_grad((1, 8), executor_kind)
def verify_conv2d_backward_weight(
executor_kind, dy_shape, x_shape, kernel_size, stride, padding, groups=1, out_channels=None
):
dtype = "float32"
dy = relay.var("dy", shape=dy_shape, dtype=dtype)
x = relay.var("x", shape=x_shape, dtype=dtype)
dw_func = relay.Function(
[dy, x],
relay.nn.conv2d_backward_weight(
dy,
x,
strides=stride,
padding=padding,
kernel_siz |
e=kernel_size,
groups=groups,
channels=out_channels,
out_dtype=dtype,
),
)
dw_func_legalized = run_opt_pass(dw_func, relay.transform.Legalize())
for dw, target in [(dw_func_legalized, "llvm"), (dw_func, "cuda -libs=cudnn")]:
if "cudnn" in target and not tvm.contrib.cudnn.exists():
continue
dev = tvm.device(target, 0)
dy_np = np.random.randn(*dy_shape).astype(dtype)
x_np = np.random.randn(*x_shape).astype(dtype)
dw_np = (
relay.create_executor(executor_kind, device=dev, target=target)
.evaluate(dw)(dy_np, x_np)
.numpy()
)
ref_dw_np = tvm.topi.testing.conv2d_backward_weight_python(
dy_np, x_np, kernel_size, stride, padding, groups=groups, channels=out_channels
)
np.testing.assert_allclose(dw_np, ref_dw_np, rtol=1e-4, atol=1e-4)
def test_conv2d_backward_weight(executor_kind):
verify_conv2d_backward_weight(
executor_kind, (2, 8, 32, 32), (2, 4, 32, 32), (3, 3), (1, 1), (1, 1)
)
verify_conv2d_backward_weight(
executor_kind, (2, 16, 15, 15), (2, 3, 32, 32), (3, 3), (2, 2), (0, 0)
)
verify_conv2d_backward_weight(
executor_kind,
(1, 16, 32, 32),
(1, 16, 32, 32),
(3, 3),
(1, 1),
(1, 1),
groups=16,
out_channels=16,
)
def test_conv2d_backward_weight_infer_type():
depthwise_conv_code = """
fn (%input0: Tensor[(1, 3, 32, 32), float32], %v0_weight: Tensor[(3, 1, 3, 3), float32], %v0_bias: Tensor[(3), float32]) {
%0 = nn.conv2d(%input0, %v0_weight, padding=[1, 1, 1, 1], groups=3, channels=3, kernel_size=[3, 3]);
nn.bias_add(%0, %v0_bias)
}
"""
normal_conv_code = """
fn (%input0: Tensor[(1, 3, 32, 32), float32], %v0_weight: Tensor[(3, 3, 3, 3), float32], %v0_bias: Tensor[(3), float32]) {
%0 = nn.conv2d(%input0, %v0_weight, padding=[1, 1, 1, 1], groups=1, channels=3, kernel_size=[3, 3]); |
nn.bias_add(%0, %v0_bias)
}
"""
SEMVER = '
for code in [normal_conv_code, depthwise_conv_code]:
expr = tvm.parser.parse_expr(SEMVER + code)
fmod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(fmod)
bwd_expr = relay.transform.gradient(mod["main"], mode="first_order")
bwd_mod = tvm.IRModule.from_expr(bwd_expr)
bwd_mod = relay.transform.InferType()(bwd_mod)
if __name__ == "__main__":
pytest.main([__file__]) |
import numpy as np |
import pytest |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.testing |
import check_grad, run_infer_type, run_opt_pass, _np_randn_from_type
from tvm.relay.transform |
import gradient |
import tvm.testing
executor_kind = tvm.testing.parameter("debug")
@tvm.testing.uses_gpu
def test_clip(executor_kind):
for dtype in ("float32", "float64"):
ref = lambda x: np.where(
x > 10.0, np.zeros_like(x), np.where(x < 1.0, np.zeros_like(x), np.ones_like(x))
)
x = relay.var("x", relay.TensorType((10, 4), dtype))
y = tvm.relay.clip(x, 1.0, 10.0)
data = np.random.rand(10, 4).astype(dtype) * 11.0
ref_grad = ref(data)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
def verify_transpose_grad(d_shape, axes=None, executor_kind="vm"):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function([data], relay.transpose(data, axes=axes))
check_grad(fwd_func, executor_kind=executor_kind)
def test_transpose_grad(executor_kind):
verify_transpose_grad((1, 2, 3, 4), executor_kind=executor_kind)
verify_transpose_grad((1, 2, 3, 4), axes=(0, 2, 3, 1), executor_kind=executor_kind)
def test_negative_grad(executor_kind):
data = relay.var("data", relay.TensorType((10, 4), "float32"))
fwd_func = relay.Function([data], relay.negative(data))
check_grad(fwd_func, executor_kind=executor_kind)
def test_cast_grad(executor_kind):
data = relay.var("data", relay.TensorType((10, 4), "float32"))
fwd_func = relay.Function([data], relay.cast(data, "float64"))
check_grad(fwd_func, executor_kind=executor_kind)
def test_cast_like_grad(executor_kind):
data = relay.var("data", shape=(10, 4), dtype="float32")
like = relay.var("like", shape=(1,), dtype="float64")
fwd_func = relay.Function([data, like], relay.cast_like(da |
ta, like))
check_grad(fwd_func, executor_kind=executor_kind)
def test_copy_grad(executor_kind):
data = relay.var("data", relay.TensorType((10, 4), "float64"))
fwd_func = relay.Function([data], relay.copy(data))
check_grad(fwd_func, executor_kind=executor_kind)
def test_take_grad(executor_kind):
data_dtype = relay.TensorType((3, 4, 5), "float64")
data = relay.var("data", data_dtype)
indices = relay.var("indices", relay.TensorType((relay.Any(),), "int32"))
inputs = [_np_randn_from_type(data_dtype, scale=1e-5), np.array([1, 2], dtype="int32")]
test_inputs = [inputs[0]]
fwd_func = relay.Function([data, indices], relay.take(data, indices, axis=1))
check_grad(fwd_func, inputs=inputs, test_inputs=test_inputs, executor_kind=executor_kind)
fwd_func = relay.Function([data, indices], relay.take(data, indices, axis=None))
check_grad(fwd_func, inputs=inputs, test_inputs=test_inputs, executor_kind=executor_kind)
def test_stack_grad(executor_kind):
args = [relay.var(c, shape=(2, 3, 4), dtype="float64") for c in "xyz"]
fwd_func = relay.Function(args, relay.stack(args, axis=0))
check_grad(fwd_func, executor_kind=executor_kind)
def test_squeeze_grad(executor_kind):
data = relay.var("data", shape=(2, 1, 1, 3, 4, 1), dtype="float64")
fwd_func = relay.Function([data], relay.squeeze(data))
fwd_func_subset = relay.Function([data], relay.squeeze(data, axis=[1, -1]))
check_grad(fwd_func, executor_kind=executor_kind)
check_grad(fwd_func_subset, executor_kind=executor_kind)
def test_arange_grad(executor_kind):
dtype = "float64"
start = relay.var("start", relay.TensorType((), dtype))
stop = relay.var("stop", relay.TensorType((), dtype))
step = relay.var("step", relay.TensorType((), dtype))
values = [np.array(v, dtype=dtype) for v in [2.5, 9.5, 1.8]]
fwd_func = relay.Function([start, stop, step], relay.arange(start, stop, step, dtype))
check_grad(fwd_func, inputs=values, executor_kind=executor_kind) |
def test_gather_nd_grad(executor_kind):
data = relay.var("data", relay.TensorType((2, 3), "float64"))
indices = relay.var("indices", relay.TensorType((2, 4), "int64"))
fwd = relay.Function([data, indices], relay.gather_nd(data, indices))
data_np = np.random.rand(2, 3).astype("float64")
indices_np = np.array([[0, 1, 1, 0], [0, 1, 0, 0]], dtype="int64")
check_grad(
fwd, inputs=[data_np, indices_np], test_inputs=[data_np], executor_kind=executor_kind
)
def test_reshape_like_grad(executor_kind):
data = relay.var("data", shape=(2, 3, 4), dtype="float32")
shape_like = relay.var("shape_like", shape=(6, 2, 2), dtype="float32")
fwd_func = relay.Function([data, shape_like], relay.reshape_like(data, shape_like))
check_grad(fwd_func, executor_kind=executor_kind)
def test_zeros_ones_grad_const_ints():
static_ty = relay.TensorType([2, 3, 4], dtype="float32")
expected_ty = relay.TupleType([static_ty, relay.TupleType([])])
for op in [relay.zeros, relay.ones]:
fwd_func = relay.Function([], op(static_ty.concrete_shape, static_ty.dtype))
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty)
def test_zeros_ones_grad_const_expr():
shape_const = relay.const(np.array([2, 3, 4]), dtype="int32") * relay.const(1, dtype="int32")
static_ty = relay.TensorType([2, 3, 4], dtype="float32")
dyn_ty = relay.TensorType([relay.Any(), relay.Any(), relay.Any()], dtype="float32")
expected_ty_static = relay.TupleType([static_ty, relay.TupleType([])])
expected_ty_dyn = relay.TupleType([dyn_ty, relay.TupleType([])])
for op in [relay.zeros, relay.ones]:
fwd_func = relay.Function([], op(shape_const, static_ty.dtype))
fwd_func = run_opt_pass(fwd_func, relay.transform.DynamicToStatic())
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty_static) |
fwd_func = relay.Function([], op(shape_const, static_ty.dtype))
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty_dyn)
def test_zeros_ones_grad_dynamic(executor_kind):
rank = np.random.randint(low=1, high=5, dtype="int32")
dyn_shape = np.random.randint(low=1, high=4, size=(rank,), dtype="int32")
shape_data = relay.var("shape_data", shape=(rank,), dtype="int32")
for op, op_ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
fwd_func = relay.Function([shape_data], op(shape_data, dtype="float32"))
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
for target, dev in tvm.testing.enabled_targets():
res, (grad,) = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
bwd_func
)(dyn_shape)
tvm.testing.assert_allclose(res.numpy(), op_ref(dyn_shape, dtype="float32"))
tvm.testing.assert_allclose(grad.numpy(), np.zeros((rank,), dtype="int32"))
if __name__ == "__main__":
pytest.main() |
import pytest |
import numpy as np |
import tvm.testing
from tvm |
import relay
from tvm.relay.testing |
import check_grad, _np_randn_from_type
executor_kind = tvm.testing.parameter("debug")
def verify_reduction_grad(executor_kind, red_fn, d_shape, axis=None, keepdims=False, exclude=False):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function([data], red_fn(data, axis=axis, keepdims=keepdims, exclude=exclude))
check_grad(fwd_func, executor_kind=executor_kind)
def test_reduction_grad(executor_kind):
def _unbiased_variance(x, axis=None, keepdims=False, exclude=False):
return relay.variance(x, axis=axis, keepdims=keepdims, exclude=exclude, unbiased=True)
for op in (relay.sum, relay.variance, _unbiased_variance, relay.mean):
verify_reduction_grad(executor_kind, op, (4, 2))
verify_reduction_grad(executor_kind, op, (4, 2), axis=-1, keepdims=True)
verify_reduction_grad(executor_kind, op, (4, 2, 1), axis=(1, 2), exclude=True)
verify_reduction_grad(executor_kind, op, (4, 2, 1), axis=1)
def verify_max_grad(executor_kind, d_shape, axis=None, keepdims=False, exclude=False):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function(
[data], relay.max(data, axis=axis, keepdims=keepdims, exclude=exclude)
)
check_grad(fwd_func, scale=1e-3, executor_kind=executor_kind)
def test_max_grad(executor_kind):
verify_max_grad(executor_kind, (10, 10), axis=None)
verify_max_grad(executor_kind, (10, 10), axis=-1)
verify_max_grad(executor_kind, (6, 3, 2), axis=(1, 2), keepdims=True)
verify_max_grad(executor_kind, (5, 4, 3), axis=(0, 2), exclude=True)
def test_where_grad(executor_kind):
cond_type = relay.TensorType((2, 3, 4), "int32")
lhs_type = relay.TensorType((1, 3, 4), "float32")
rhs_type = relay.TensorType((2, 1, 4), "float32")
inputs = [
np.random.randint(2, size=cond_type.concrete_shape, dtype=cond_type.dtype),
_np_randn_from_type(lhs_type, scale=1e-5),
_np_randn_from_type(rhs_type, scale=1e-5),
]
cond = relay.var( |
"cond", type_annotation=cond_type)
lhs = relay.var("lhs", type_annotation=lhs_type)
rhs = relay.var("rhs", type_annotation=rhs_type)
fwd_func = relay.Function([cond, lhs, rhs], relay.where(cond, lhs, rhs))
check_grad(fwd_func, inputs=inputs, test_inputs=inputs[1:], executor_kind=executor_kind)
def test_less_equal_grad(executor_kind):
x_type = relay.TensorType((2, 3, 4), "float32")
y_type = relay.TensorType((3, 1), "float32")
inputs = [
np.random.choice([-1, 1], size=x_type.concrete_shape).astype(x_type.dtype),
np.random.choice([-2, 2], size=y_type.concrete_shape).astype(y_type.dtype),
]
x = relay.var("x", type_annotation=x_type)
y = relay.var("y", type_annotation=y_type)
fwd_func = relay.Function([x, y], relay.less_equal(x, y))
check_grad(fwd_func, inputs=inputs, test_inputs=inputs, eps=1e-6, executor_kind=executor_kind)
def test_not_equal_grad(executor_kind):
x_type = relay.TensorType((2, 3, 4), "float32")
y_type = relay.TensorType((3, 1), "float32")
inputs = [
np.random.choice([-1, 1], size=x_type.concrete_shape).astype(x_type.dtype),
np.random.choice([-2, 2], size=y_type.concrete_shape).astype(y_type.dtype),
]
x = relay.var("x", type_annotation=x_type)
y = relay.var("y", type_annotation=y_type)
fwd_func = relay.Function([x, y], relay.not_equal(x, y))
check_grad(fwd_func, inputs=inputs, test_inputs=inputs, eps=1e-6, executor_kind=executor_kind)
def test_strided_slice_grad(executor_kind):
def check(sh, dtype, begin, end, strides, slice_mode):
x = relay.var("x", shape=sh, dtype=dtype)
f = relay.Function(
[x],
relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=slice_mode),
)
check_grad(f, executor_kind=executor_kind)
check((2, 3, 4), "float32", (0, 1, 0), (-1, -1, 1), (1, 1, 1), "size")
check((2, 3, 4), "float32", (0, 1, 0), (2, 3, 1), (1, 1, 1), "end")
check((2, 3, 4) |
, "float32", (0, 0, 0), (-1, -1, -1), (1, 1, 2), "size")
check((2, 3, 4), "float32", (0, 0, 0), (2, 3, 4), (1, 1, 2), "end")
if __name__ == "__main__":
pytest.main() |
import numpy as np |
import pytest |
import tvm
from tvm |
import te |
import scipy
from tvm |
import relay |
import pytest
from tvm.relay.testing |
import run_infer_type |
import tvm.topi.testing
from tvm.contrib.nvcc |
import have_fp16 |
import tvm.testing
executor_kind = tvm.testing.parameter("graph", "vm")
def sigmoid(x):
one = np.ones_like(x)
return one / (one + np.exp(-x))
def relu(x):
x_copy = np.copy(x)
np.maximum(x_copy, 0, x_copy)
return x_copy
def rsqrt(x):
one = np.ones_like(x)
return one / np.sqrt(x)
class TestUnaryOp:
op_list = {
"log": (tvm.relay.log, np.log, True),
"exp": (tvm.relay.exp, np.exp, True),
"erf": (tvm.relay.erf, scipy.special.erf, True),
"sqrt": (tvm.relay.sqrt, np.sqrt, True),
"rqsrt": (tvm.relay.rsqrt, rsqrt, True),
"sigmoid": (tvm.relay.sigmoid, sigmoid, True),
"tanh": (tvm.relay.tanh, np.tanh, False),
"relu": (relay.nn.relu, relu, True),
"cos": (tvm.relay.cos, np.cos, True),
"sin": (tvm.relay.sin, np.sin, True),
"tan": (tvm.relay.tan, np.tan, False),
"atan": (tvm.relay.atan, np.arctan, False),
"ceil": (tvm.relay.ceil, np.ceil, True),
"floor": (tvm.relay.floor, np.floor, True),
"trunc": (tvm.relay.trunc, np.trunc, True),
"round": (tvm.relay.round, np.round, False),
}
dtype = tvm.testing.parameter("float16", "float32")
relay_op, ref_func, supports_fp16 = tvm.testing.parameters(
*op_list.values(), ids=op_list.keys()
)
def test_unary_op(self, target, dev, relay_op, ref_func, supports_fp16, dtype):
target = tvm.target.Target(target)
if dtype == "float16":
if target.kind.name == "cuda":
if not have_fp16(tvm.cuda(0).compute_version):
pytest.xfail(
"No float16 support on local cuda device (compute_version != 5.3 and < 6.0)"
)
elif target.kind.name == "vulkan" and not target.attrs.get("supports_float16", False):
pytest.xfail("No float16 support on vulkan target (supports_float16=False)")
elif not supports_fp16:
pytest.xfail(f"No float16 support on {target.kind.na |
me} target")
if target.kind.name == "vulkan" and relay_op in [
tvm.relay.erf,
tvm.relay.tan,
tvm.relay.atan,
]:
pytest.xfail(f"Vulkan runtime doesn't yet support {relay_op}")
shape = (10, 4)
dtype = dtype
tp = relay.TensorType(shape, dtype=dtype)
x = relay.var("x", type_annotation=tp)
y = relay_op(x)
assert ("{}(%x)".format(y.op.name)) in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == tp
if ref_func is not None:
data = np.random.rand(*shape).astype(dtype)
ref_res = ref_func(data).astype(dtype)
func = relay.Function([x], y)
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tolerance = 1e-2 if dtype == "float16" else 1e-5
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=tolerance)
@tvm.testing.uses_gpu
def test_binary_op():
def inst(vars, sh):
return [vars.get(s, s) for s in sh]
def check_binary_op(opfunc, ref, dtype):
n = te.var("n")
s1 = (5, n, 5)
s2 = (n, 1)
t1 = relay.TensorType(s1)
t2 = relay.TensorType(s2)
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(dtype)
y_data = np.random.rand(5, 10, 5).astype(dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, dev in tvm.t |
esting.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01, atol=1e-3)
for opfunc, ref in [
(relay.add, np.add),
(relay.subtract, np.subtract),
(relay.multiply, np.multiply),
(relay.divide, np.divide),
(relay.floor_divide, np.floor_divide),
(relay.floor_mod, np.fmod),
]:
for dtype in ["float16", "float32"]:
check_binary_op(opfunc, ref, dtype)
@tvm.testing.uses_gpu
def test_expand_dims():
def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):
x = relay.Var("x", relay.TensorType(dshape, dtype))
func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))
for target, dev in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = data.reshape(oshape)
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
for dtype in ["float16", "float32"]:
verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), dtype, (1, 3, 10), -3, 1)
@tvm.testing.uses_gpu
def test_bias_add():
for dtype in ["float16", "float32"]:
xshape = (10, 2, 3, 4)
bshape = (2,)
rtol = 1e-2 if dtype == "float16" else 1e-5
x = relay.var("x", sh |
ape=xshape, dtype=dtype)
bias = relay.var("bias", dtype=dtype)
z = relay.nn.bias_add(x, bias)
zz = run_infer_type(z)
assert "axis=" not in zz.astext()
assert zz.args[1].checked_type == relay.TensorType(bshape, dtype)
func = relay.Function([x, bias], z)
x_data = np.random.uniform(size=xshape).astype(dtype)
y_data = np.random.uniform(size=bshape).astype(dtype)
ref_res = x_data + y_data.reshape((2, 1, 1))
for target, dev in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol)
def test_bias_add_type_failure():
def assert_failure(expr):
try:
run_infer_type(expr)
except tvm._ffi.base.TVMError:
return
else:
assert False
for axis in (0, -1, -3, 1):
assert_failure(relay.nn.bias_add(relay.const(1), relay.const(2), axis=axis))
def test_expand_dims_infer_type():
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", shape=(n, t, d), dtype=dtype)
y = relay.expand_dims(x, axis=2)
assert "axis=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, t, 1, 100), dtype)
@tvm.testing.uses_gpu
def test_softmax():
for shape in [(10, 4), (10, 5, 4)]:
for dtype in ["float16", "float32"]:
if dtype == "float16":
continue
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.softmax(x, axis=1)
assert "nn.softmax" in y.astext()
yy = run_infer_type(y)
asse |
rt yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = tvm.topi.testing.softmax_python(x_data, axis=1)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_log_softmax():
for shape in [(10, 4), (10, 5, 4)]:
for dtype in ["float16", "float32"]:
if dtype == "float16":
continue
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.log_softmax(x, axis=1)
assert "nn.log_softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = tvm.topi.testing.log_softmax_python(x_data, axis=1)
for target, dev in tvm.testing.enabled_targets():
if target == "nvptx":
continue
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_concatenate(executor_kind):
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", shape=(n, t, d))
y = relay.var("y", shape=(n, t, d))
z = relay.concatenate((x, y), axis=-1)
assert "axis=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
x = relay.exp(x)
z = relay.concatenate((x, y), axis=2)
zz = run_infer_type(z) |
assert zz.checked_type == relay.TensorType((n, t, 200))
z = relay.concatenate((x, y), axis=1)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t + t, 100))
try:
x = relay.var("p1", shape=(2, 5))
y = relay.var("p2", shape=(2, 3))
c = relay.concatenate([x, y], axis=0)
func = relay.Function([x, y], c)
zz = run_infer_type(func)
except tvm._ffi.base.TVMError:
pass
else:
assert False
x = relay.var("x", shape=(10, 5), dtype=dtype)
y = relay.var("y", shape=(10, 5), dtype=dtype)
t = relay.var("z", shape=(), dtype=dtype)
z = relay.concatenate((x, y), axis=1)
z = relay.add(z, t)
func = relay.Function([x, y, t], z)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
ref_res = np.concatenate((x_data, y_data), axis=1) + t_data
for target, dev in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data, t_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_dropout(executor_kind):
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), te.size_var("d")
input_ty = relay.TensorType((n, t, d), dtype)
x = relay.var("x", input_ty)
y = relay.nn.dropout(x, rate=0.75)
assert "rate=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == input_ty
in_np = np.random.random([4, 5, 6]).astype("float32")
x = relay.const(in_np)
y = relay.nn.drop |
out(x, rate=0.5)
func = relay.Function([], y)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)()
tvm.testing.assert_allclose(op_res.numpy(), in_np, rtol=0.01)
def test_batch_norm():
for dtype in ["float16", "float32"]:
data = relay.var("data", relay.TensorType((3, 2, 1), dtype))
beta = relay.var("beta", relay.TensorType((2,), dtype))
gamma = relay.var("gamma", relay.TensorType((2,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((2,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((2,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert "center=" in yy.astext()
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.TensorType((3, 2, 1), dtype),
relay.TensorType((2,), dtype),
relay.TensorType((2,), dtype),
]
)
)
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, axis=0, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((3, 2, 1), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype),
]
)
)
data = relay.var("data", relay.Tenso |
rType((1, 2, 3), dtype))
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, axis=-1, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((1, 2, 3), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype),
]
)
)
def do_concat_test(shapes, t_shape, dtype, axis, dev, target):
varsToConcat = []
inputData = []
pos = 0
for s in shapes:
varsToConcat.append(relay.var("x{}".format(pos), shape=s))
inputData.append(np.random.rand(*s).astype(dtype))
pos += 1
t = relay.var("z", shape=t_shape, dtype=dtype)
z = relay.concatenate(varsToConcat, axis=axis)
z = relay.add(z, t)
params = varsToConcat
params.append(t)
func = relay.Function(params, z)
t_data = np.random.uniform(low=-10, high=10, size=t_shape).astype(dtype)
ref_res = np.concatenate((tuple(inputData)), axis=axis) + t_data
mod = tvm.IRModule.from_expr(func)
executor = relay.create_executor("graph", mod=mod, device=dev, target=target)
op_res1 = executor.evaluate()(*inputData, t_data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=0.000001)
op_res2 = relay.create_executor("debug", device=dev, target=target).evaluate(func)(
*inputData, t_data
)
tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=0.000001)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate1(target, dev):
np.random.seed(471)
maxNumDimensions = 6
shape = [4, 32, 16, 1, 31, 20, 21, 8 |
, 28, 7]
for dtype in ["float32"]:
for dimsNum in range(1, maxNumDimensions):
np.random.shuffle(shape)
for axis in range(0, dimsNum):
numToConcat = np.random.uniform(low=2, high=10, size=(1)).astype("int64")[0]
shapes = []
normalizedAxis = axis
if axis < 0:
normalizedAxis += dimsNum
finalSize = 0
for i in range(0, numToConcat):
shp = tuple(shape[:dimsNum])
finalSize += shape[(i % len(shape))]
shapes.append(
shp[:normalizedAxis]
+ tuple([shape[(i % len(shape))]])
+ shp[normalizedAxis + 1 :]
)
t_shape = shp[:normalizedAxis] + tuple([finalSize]) + shp[normalizedAxis + 1 :]
do_concat_test(shapes, t_shape, dtype, axis, dev, target)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate2(target, dev):
np.random.seed(13)
maxNumDimensions = 6
shape = [8, 3, 25, 33, 12, 29, 5, 11, 29, 11]
ind = 0
for dtype in ["float32"]:
for dimsNum in range(2, maxNumDimensions):
np.random.shuffle(shape)
for axis in range(-dimsNum + 1, dimsNum):
numToConcat = np.random.uniform(low=2, high=10, size=(1)).astype("int64")[0]
shapes = []
normalizedAxis = axis
if axis < 0:
normalizedAxis += dimsNum
finalSize = 0
for i in range(0, numToConcat):
axisVal = [1] * dimsNum
axisVal[axis] = shape[(ind % len(shape))]
ind += 1
finalSize += axisVal[axis]
shapes.append(tuple(axisVal))
temp = [1] * dimsNum
temp[axis] = finalSize
t_shape = tuple(temp) |
do_concat_test(shapes, t_shape, dtype, axis, dev, target)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate3(target, dev):
np.random.seed(477)
for dtype in ["float32"]:
axis = -2
ending = 1
shapes = [[3, 2, 1, ending], [3, 2, 1, ending]]
t_shape = [3, 2, 2, ending]
do_concat_test(shapes, t_shape, dtype, axis, dev, target)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate4(target, dev):
np.random.seed(7)
x_shape = (2, 1)
x = relay.var("x", shape=x_shape, dtype="int64")
concat = relay.concatenate([x], axis=1)
f = relay.Function([x], concat)
x_val = np.array([[33], [13]], dtype="int64")
graph = relay.create_executor("graph", device=tvm.cpu(), target="llvm")
op_res = graph.evaluate(f)(x_val)
ref_res = np.concatenate([x_val], axis=1)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.000001)
def test_batch_norm_fold_const():
axis = 1
dtype = "float32"
shape = [4, 5, 6]
data_np = np.random.random(shape).astype(dtype)
beta_np = np.random.random(shape[axis]).astype(dtype)
gamma_np = np.random.random(shape[axis]).astype(dtype)
moving_mean_np = np.random.random(shape[axis]).astype(dtype)
moving_var_np = np.random.random(shape[axis]).astype(dtype)
data = relay.var("data", relay.TensorType(shape, dtype))
beta = relay.var("beta", relay.TensorType((shape[1],), dtype))
gamma = relay.var("gamma", relay.TensorType((shape[1],), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((shape[1],), dtype))
moving_var = relay.var("moving_var", relay.TensorType((shape[1],), dtype))
out = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var, axis=axis).astuple()
func = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
out_const = relay.nn.batch_norm(
relay.const(data_np),
relay.const(gamma_np),
relay.const(beta_np),
relay.const(moving_mean_np),
relay.const(moving_var_n |
p),
axis=axis,
).astuple()
func_const = relay.Function([], out_const)
mod_const = tvm.IRModule.from_expr(func_const)
mod_const = relay.transform.FoldConstant()(mod_const)
const_data_out = mod_const["main"].body[0].data
const_moving_mean_out = mod_const["main"].body[1].data
const_moving_var_out = mod_const["main"].body[2].data
vm_data_out, vm_moving_mean_out, vm_moving_var_out = relay.create_executor(
"vm", device=tvm.device("llvm"), target="llvm"
).evaluate(func)(data_np, gamma_np, beta_np, moving_mean_np, moving_var_np)
tvm.testing.assert_allclose(const_data_out.numpy(), vm_data_out.numpy())
tvm.testing.assert_allclose(const_moving_mean_out.numpy(), vm_moving_mean_out.numpy())
tvm.testing.assert_allclose(const_moving_var_out.numpy(), vm_moving_var_out.numpy())
@pytest.mark.xfail
def test_matmul_type_check():
dtype = "float16"
n, c, h, w = 2, 2, 2, 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
mismatch_w = 3
w = relay.var("w", relay.TensorType((mismatch_w, 2), dtype))
y = relay.nn.matmul(x, w)
yy = run_infer_type(y)
@tvm.testing.uses_gpu
def test_matmul(executor_kind):
for dtype in ["float16", "float32"]:
if dtype == "float16":
continue
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.TensorType((2, w), dtype))
y = relay.nn.matmul(x, w, units=2, transpose_b=True)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, w, h), dtype))
wh, ww = te.size_var("wh"), te.size_var("ww")
w = relay.var("w", relay.TensorType((wh, ww), dtype))
y = relay.nn.matmul(x, w, transpose_a=True |
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.matmul(x, w, units=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
x = relay.var("x", shape=(5, 10), dtype=dtype)
w = relay.var("w", shape=(5, 2), dtype=dtype)
z = relay.nn.matmul(x, w, transpose_a=True)
func = relay.Function([x, w], z)
x_data = np.random.rand(5, 10).astype(dtype)
w_data = np.random.rand(5, 2).astype(dtype)
ref_res = np.dot(x_data.transpose(), w_data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, w_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@pytest.mark.xfail
def test_dense_type_check():
dtype = "float16"
n, c, h, w = 2, 2, 2, 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
mismatch_w = 3
w = relay.var("w", relay.TensorType((2, mismatch_w), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
@tvm.testing.uses_gpu
def test_dense(executor_kind):
for dtype in ["float16", "float32"]:
if dtype == "float16":
continue
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.TensorType((2, w), dtype))
y = relay.nn.dense(x, w, units=2)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2 |
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
wh, ww = te.size_var("wh"), te.size_var("ww")
w = relay.var("w", relay.TensorType((ww, wh), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)
m, k = 4, 2
x = relay.var("x", relay.TensorType((m, k), dtype))
k, nw = relay.Any(), 6
w = relay.var("w", relay.TensorType((k, n), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.dense(x, w, units=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
x = relay.var("x", shape=(10, 5), dtype=dtype)
w = relay.var("w", shape=(2, 5), dtype=dtype)
z = relay.nn.dense(x, w)
func = relay.Function([x, w], z)
x_data = np.random.rand(10, 5).astype(dtype)
w_data = np.random.rand(2, 5).astype(dtype)
ref_res = np.dot(x_data, w_data.T)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, w_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_dense_same_args_compile():
for dtype in ["float32", "int8"]:
x = relay.var("x", shape=(32, 64), dtype=dtype)
out_dtype = "int32" if dtype == "int8" else "float32"
f = relay.Function([x], relay.nn.dense(x, x, out_dtype=out_dtype))
m = tvm.IRModule.from_expr(f)
for target, _ in tvm.testing.enabled_targets():
tvm.relay.build(m, target=target)
def test_dense_dtype():
data_dtype = "uint8"
weight_dtype = "int8"
out_dtype = " |
uint8"
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), data_dtype))
w = relay.var("w", relay.TensorType((2, w), weight_dtype))
y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), out_dtype)
assert run_infer_type(yy.args[0]).checked_type.dtype == "uint8"
assert run_infer_type(yy.args[1]).checked_type.dtype == "int8"
def test_bitserial_dense():
m, k = te.size_var("m"), te.size_var("k")
x = relay.var("x", relay.TensorType((m, k), "int16"))
w = relay.var("w", relay.TensorType((k, 32), "int16"))
y = relay.nn.bitserial_dense(x, w, units=32)
"units=8" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((m, 32), "int16")
@tvm.testing.requires_cascadelake
def test_dense_vnni():
data_shape = (32, 96)
weight_shape = (128, 96)
for data_dtype in ["uint8", "int8"]:
data = relay.var("data", shape=data_shape, dtype=data_dtype)
weight = relay.var("weight", shape=weight_shape, dtype="int8")
bias = relay.var("bias", shape=(weight_shape[0],), dtype="int32")
dense = relay.nn.dense(data, weight, out_dtype="int32")
out = relay.nn.bias_add(dense, bias)
mod = tvm.IRModule.from_expr(out)
target = "llvm -mcpu=cascadelake"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.get_source("asm")
assert "vpdpbusd" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
a = np.random.uniform(1, 10, size=data_shape).astype(data_dtype)
b = np.random.uniform(1, 10, size=weight_shape).astype("int8")
c = np.random.uniform(1, 10, size=(weight_shape[0],)).astype("int32")
runtime.set_input("data", a)
runtime. |
set_input("weight", b)
runtime.set_input("bias", c)
runtime.run()
out = runtime.get_output(0).numpy()
ref = np.dot(a.astype("int32"), b.transpose().astype("int32")) + c
np.testing.assert_equal(out, ref)
@pytest.mark.skip("Requires GFX10 AMDGPU")
def test_dense_rocm_sdot4():
data_shape = (32, 96)
weight_shape = (128, 96)
data_dtype = "int8"
data = relay.var("data", shape=data_shape, dtype=data_dtype)
weight = relay.var("weight", shape=weight_shape, dtype="int8")
bias = relay.var("bias", shape=(weight_shape[0],), dtype="int32")
dense = relay.nn.dense(data, weight, out_dtype="int32")
out = relay.nn.bias_add(dense, bias)
mod = tvm.IRModule.from_expr(out)
target = "rocm -mattr=+dotprod"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.imported_modules[0].get_source("asm")
assert "v_dot4_i32_i8" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
a = np.random.uniform(1, 10, size=data_shape).astype(data_dtype)
b = np.random.uniform(1, 10, size=weight_shape).astype("int8")
c = np.random.uniform(1, 10, size=(weight_shape[0],)).astype("int32")
runtime.set_input("data", a)
runtime.set_input("weight", b)
runtime.set_input("bias", c)
runtime.run()
out = runtime.get_output(0).numpy()
ref = np.dot(a.astype("int32"), b.transpose().astype("int32")) + c
np.testing.assert_equal(out, ref)
def test_extern_concat_injective_fuse():
mod = tvm.parser.fromtext(
"""
def @main(%p0844: Tensor[(1, 384), int64], %p1652: Tensor[(2016, 128), float16]) {
%1331 = cast(%p0844, dtype="int32");
%1332 = take(%p1652, %1331, axis=0);
%1333 = strided_slice(%1332, begin=[0, 1, 0], end=[1, 384, 128], strides=[1, 1, 1], axes=None);
%1334 = strided_slice(%1332, begin=[0, 0, 0], end=[1, -1, 128], strides=[1, 1, 1], axes=None); |
%1335 = nn.pad(%1333, 0, pad_width=[[0, 0], [0, 1], [0, 0]]);
%1336 = nn.pad(%1334, 0, pad_width=[[0, 0], [1, 0], [0, 0]]);
%1337 = (%1335, %1332, %1336);
%1338 = concatenate(%1337, axis=2);
reshape(%1338, newshape=[-1, 384])
}
"""
)
relay.build(mod, params={}, target="llvm")
if __name__ == "__main__":
pytest.main([__file__]) |
""" Support level10 operator test cases.
""" |
import sys |
import pytest |
import numpy as np |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import relay, te, topi
from tvm.relay |
import transform
from tvm.relay.testing |
import run_infer_type
executor_kind = tvm.testing.parameter("graph", "vm")
@tvm.testing.uses_gpu
def test_checkpoint(executor_kind):
dtype = "float32"
xs = [relay.var("x{}".format(i), dtype) for i in range(4)]
f = relay.multiply(relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3]))
f_checkpoint = relay.annotation.checkpoint(f)
func, func_checkpoint = relay.Function(xs, f), relay.Function(xs, f_checkpoint)
f, f_checkpoint = run_infer_type(func), run_infer_type(func_checkpoint)
assert f.checked_type == f_checkpoint.checked_type
inputs = [np.random.uniform() for _ in range(len(xs))]
for target, dev in tvm.testing.enabled_targets():
f_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(*inputs)
f_checkpoint_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
f_checkpoint
)(*inputs)
tvm.testing.assert_allclose(f_res.numpy(), f_checkpoint_res.numpy(), 0, 0)
def test_checkpoint_alpha_equal():
xs = [relay.var("x{}".format(i), relay.TensorType((1,), "float32")) for i in range(4)]
f = relay.Function(
xs,
relay.annotation.checkpoint(
relay.multiply(relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3]))
),
)
df = transform.gradient(run_infer_type(f))
with tvm.transform.PassContext(opt_level=3):
passes = [
transform.PartialEvaluate(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=True),
]
mod = tvm.transform.Sequential(passes)(tvm.IRModule.from_expr(df))
df = mod["main"]
df_parsed = tvm.parser.parse_expr(
"""
fn (%x: Tensor[(1), float32], %y: Tensor[(1), float32],
%z: Tensor[(1), float32], %w: Tensor[(1), float32])
-> (Tensor[(1), float32],
(Tensor[(1), float32], Tensor[(1), float32],
Tensor[(1), float32], Tensor[(1), float32])) { |
%0 = add(%x, %y);
%1 = add(%z, %w);
let %x1: Tensor[(1), float32] = multiply(%0, %1);
let %x2: Tensor[(1), float32] = ones_like(%x1);
let %x3: Tensor[(1), float32] = add(%x, %y);
let %x4: Tensor[(1), float32] = add(%z, %w);
%2 = zeros_like(%x3);
%3 = multiply(%x2, %x4);
%4 = collapse_sum_like(%3, %x3);
let %x5: Tensor[(1), float32] = add(%2, %4);
%5 = zeros_like(%x4);
%6 = multiply(%x2, %x3);
%7 = collapse_sum_like(%6, %x4);
let %x6: Tensor[(1), float32] = add(%5, %7);
%8 = zeros_like(%x);
%9 = collapse_sum_like(%x5, %x);
%10 = add(%8, %9);
%11 = zeros_like(%y);
%12 = collapse_sum_like(%x5, %y);
%13 = add(%11, %12);
%14 = zeros_like(%z);
%15 = collapse_sum_like(%x6, %z);
%16 = add(%14, %15);
%17 = zeros_like(%w);
%18 = collapse_sum_like(%x6, %w);
%19 = add(%17, %18);
%20 = (%10, %13, %16, %19);
(%x1, %20)
}
"""
)
tvm.ir.assert_structural_equal(df, df_parsed)
def test_checkpoint_alpha_equal_tuple():
xs = [relay.var("x{}".format(i), relay.TensorType((1,), "float32")) for i in range(4)]
f = relay.Function(
xs,
relay.annotation.checkpoint(
relay.Tuple([relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3])])
),
)
df = transform.gradient(run_infer_type(f))
with tvm.transform.PassContext(opt_level=3):
passes = [
transform.PartialEvaluate(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=True),
]
mod = tvm.transform.Sequential(passes)(tvm.IRModule.from_expr(df))
df = mod["main"]
df_parsed = tvm.parser.parse_expr(
"""
fn (%x: Tensor[(1), float32], %y: Tensor[(1), float32],
%z: Tensor[( |
1), float32], %w: Tensor[(1), float32])
-> ((Tensor[(1), float32], Tensor[(1), float32]),
(Tensor[(1), float32], Tensor[(1), float32],
Tensor[(1), float32], Tensor[(1), float32])) {
let %x1: Tensor[(1), float32] = add(%x, %y) /* ty=Tensor[(1), float32] */;
let %x2: Tensor[(1), float32] = add(%z, %w) /* ty=Tensor[(1), float32] */;
let %x3: Tensor[(1), float32] = zeros_like(%x2) /* ty=Tensor[(1), float32] */;
let %x4: Tensor[(1), float32] = ones_like(%x1) /* ty=Tensor[(1), float32] */;
%0 = (%x1, %x2);
%1 = zeros_like(%x) /* ty=Tensor[(1), float32] */;
%2 = collapse_sum_like(%x4, %x) /* ty=Tensor[(1), float32] */;
%3 = add(%1, %2) /* ty=Tensor[(1), float32] */;
%4 = zeros_like(%y) /* ty=Tensor[(1), float32] */;
%5 = collapse_sum_like(%x4, %y) /* ty=Tensor[(1), float32] */;
%6 = add(%4, %5) /* ty=Tensor[(1), float32] */;
%7 = zeros_like(%z) /* ty=Tensor[(1), float32] */;
%8 = collapse_sum_like(%x3, %z) /* ty=Tensor[(1), float32] */;
%9 = add(%7, %8) /* ty=Tensor[(1), float32] */;
%10 = zeros_like(%w) /* ty=Tensor[(1), float32] */;
%11 = collapse_sum_like(%x3, %w) /* ty=Tensor[(1), float32] */;
%12 = add(%10, %11) /* ty=Tensor[(1), float32] */;
%13 = (%3, %6, %9, %12);
(%0, %13)
}
"""
)
tvm.ir.assert_structural_equal(df, df_parsed)
@tvm.testing.uses_gpu
def test_collapse_sum_like(executor_kind):
shape = (3, 4, 5, 6)
shape_like = (4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
z = relay.collapse_sum_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
func = relay.Function([x, y], z)
x = np.random.uniform(size=shape).astype(dtype)
y = np.random.uniform(size=shape_like).astype(dtype)
ref_res = np.sum(x, 0)
for |
target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x, y
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_collapse_sum_to(executor_kind):
shape = (3, 4, 5, 6)
shape_to = (4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
z = relay.collapse_sum_to(x, shape_to)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_to, dtype)
func = relay.Function([x], z)
x = np.random.uniform(size=shape).astype(dtype)
ref_res = np.sum(x, 0)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_broadcast_to(executor_kind):
shape = (4, 1, 6)
shape_like = (3, 4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
z = relay.broadcast_to(x, shape=shape_like)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
func = relay.Function([x], z)
x = np.random.uniform(size=shape).astype(dtype)
ref_res = np.broadcast_to(x, shape_like)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_broadcast_to_const_shape_int64(executor_kind):
shape_like = relay.const(np.array([1, 5]), dtype="int64")
x = relay.var("x", shape=(1,), dtype="int64")
z = relay.broadcast_to(x, shape=shape_like)
z = relay.sum(z, axis=0)
f = relay.Function([x], z)
x = np.random.randint(10, size=(1,), dtype="int64")
ref_res = np.broadcast_to(x, (5,))
for target, dev in tvm.testing.enabled_ |
targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
def test_broadcast_concat_shape_int64(executor_kind):
x_shape = (1, 2, 1, 1)
broadcast_shape = [1, 2, 2, 1]
x = relay.var("data", relay.TensorType(x_shape, "float32"))
broadcast_to = relay.op.broadcast_to(x, relay.const(broadcast_shape, dtype="int64"))
concate = relay.op.concatenate((broadcast_to,), axis=0)
f = relay.Function([x], concate)
x = np.zeros(x_shape).astype("float32")
ref_res = np.concatenate((np.broadcast_to(x, broadcast_shape),), axis=0)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
def test_broadcast_pool2d_shape_int64(executor_kind):
x_shape = (1, 3, 32, 32)
out_shape = (2, 3, 32, 32)
x = relay.var("data", shape=x_shape, dtype="float32")
broadcast_to = relay.broadcast_to(x, shape=relay.const([2, 3, 32, 32], dtype="int64"))
pool2d = relay.nn.max_pool2d(broadcast_to, pool_size=(3, 3), padding=(1, 1, 1, 1))
sub = relay.subtract(broadcast_to, pool2d)
f = relay.Function([x], sub)
x = np.ones(x_shape).astype("float32")
ref_res = np.zeros(out_shape).astype("float32")
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
@tvm.testing.uses_gpu
def test_broadcast_to_like(executor_kind):
shape = (4, 1, 6)
shape_like = (3, 4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
z = relay.broadcast_to_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
func = relay.Function([x, y], z) |
x = np.random.uniform(size=shape).astype(dtype)
y = np.random.uniform(size=shape_like).astype(dtype)
ref_res = np.broadcast_to(x, shape_like)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x, y
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def np_slice_like(np_data, np_shape_like, axis=None):
begin_idx = [0 for _ in np_data.shape]
end_idx = list(np_data.shape)
if axis:
for i in axis:
if i < 0:
i = len(np_data.shape) + i
end_idx[i] = np_shape_like.shape[i]
else:
for i in range(len(np_data.shape)):
if i < len(np_shape_like.shape):
end_idx[i] = np_shape_like.shape[i]
slice_idx = []
for b, e in zip(begin_idx, end_idx):
slice_idx.append(slice(b, e))
np_result = np_data[tuple(slice_idx)]
return np_result
def verify_slice_like(executor_kind, data, slice_like, axes, output, dtype="float32"):
x = relay.var("data", relay.TensorType(data, dtype))
y = relay.var("slice_like", relay.TensorType(slice_like, dtype))
z = relay.slice_like(x, y, axes)
zz = run_infer_type(z)
if axes:
assert "axes" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if all(isinstance(v, int) == 0 for v in data) or all(
isinstance(v, int) == 0 for v in slice_like
):
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(size=data).astype(dtype)
y_data = np.random.uniform(size=slice_like).astype(dtype)
ref_res = np_slice_like(x_data, y_data, axes)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_slice_like(executor_kind) |
:
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
verify_slice_like(
executor_kind, data=(d1, d2, d3), slice_like=(1, 2, 3), axes=None, output=(1, 2, 3)
)
verify_slice_like(
executor_kind, data=(1, 2, 3), slice_like=(d1, d2, d3), axes=None, output=(d1, d2, d3)
)
verify_slice_like(
executor_kind, data=(d2, d3, d4), slice_like=(d1, d2, d3), axes=(1, 2), output=(d2, d2, d3)
)
verify_slice_like(
executor_kind, data=(3, 4, 5), slice_like=(1, 2, 3), axes=None, output=(1, 2, 3)
)
verify_slice_like(executor_kind, data=(3, 4, 5), slice_like=(1, 2), axes=None, output=(1, 2, 5))
verify_slice_like(
executor_kind, data=(3, 4, 5), slice_like=(1, 2, 3), axes=(1, 2), output=(3, 2, 3)
)
verify_slice_like(
executor_kind, data=(3, 4, 5), slice_like=(1, 2, 3), axes=(-1, -3), output=(1, 4, 3)
)
verify_slice_like(
executor_kind,
data=(1, 3, 224, 224),
slice_like=(1, 3, 112, 112),
axes=(2, 3),
output=(1, 3, 112, 112),
)
@tvm.testing.uses_gpu
def test_reverse_reshape(executor_kind):
def verify_reverse_reshape(executor_kind, shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reverse_reshape(x, newshape=newshape)
zz = run_infer_type(z)
assert "newshape=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_reverse_reshape(executor_kind, (2, 3, 4), (4, 0, 2), (4, 3, 2))
verify_reverse_reshape(executor_kind, (2, 3 |
, 4), (2, 0, 0), (2, 3, 4))
verify_reverse_reshape(executor_kind, (2, 3, 4), (0, -1), (3, 8))
verify_reverse_reshape(executor_kind, (2, 3, 4), (-1, 0), (6, 4))
verify_reverse_reshape(executor_kind, (2, 3, 4), (0, -3), (2, 12))
def verify_batch_matmul_with_inputs(
executor_kind, x, y, x_np, y_np, out_shape, dtype="float32", trans_x=False, trans_y=True
):
z = relay.nn.batch_matmul(x, y, transpose_a=trans_x, transpose_b=trans_y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)
input_vars = relay.analysis.free_vars(z)
func = relay.Function(input_vars, z)
z_np = tvm.topi.testing.batch_matmul(x_np, y_np, trans_x=trans_x, trans_y=trans_y)
for target, dev in tvm.testing.enabled_targets():
if len(input_vars) == 2:
z = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_np, y_np
)
else:
z = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x_np)
tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5, atol=1e-5)
def verify_batch_matmul(
executor_kind, x_shape, y_shape, out_shape, dtype="float32", trans_x=False, trans_y=True
):
x = relay.var("x", relay.TensorType(x_shape, dtype))
y = relay.var("y", relay.TensorType(y_shape, dtype))
x_np = np.random.uniform(size=x_shape).astype(dtype)
y_np = np.random.uniform(size=y_shape).astype(dtype)
verify_batch_matmul_with_inputs(
executor_kind, x, y, x_np, y_np, out_shape, dtype, trans_x, trans_y
)
@tvm.testing.uses_gpu
def test_batch_matmul(executor_kind):
b, m, n, k = te.size_var("b"), te.size_var("m"), te.size_var("n"), te.size_var("k")
x = relay.var("x", relay.TensorType((b, m, k), "float32"))
y = relay.var("y", relay.TensorType((b, n, k), "float32"))
z = relay.nn.batch_matmul(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((b, m, n), "float32")
verify_batch_matmul( |
executor_kind, (1, 16, 32), (1, 16, 32), (1, 16, 16), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (5, 16, 32), (5, 16, 32), (5, 16, 16), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (5, 16, 32), (5, 20, 32), (5, 16, 20), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (30, 16, 32), (30, 20, 32), (30, 16, 20), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (1, 32, 16), (1, 16, 32), (1, 16, 16), trans_x=True, trans_y=True
)
verify_batch_matmul(
executor_kind, (5, 16, 32), (5, 32, 16), (5, 16, 16), trans_x=False, trans_y=False
)
verify_batch_matmul(
executor_kind, (5, 32, 16), (5, 32, 20), (5, 16, 20), trans_x=True, trans_y=False
)
x_np = np.random.randn(10, 27, 64).astype("float32")
x = relay.var("x", shape=x_np.shape)
verify_batch_matmul_with_inputs(executor_kind, x, x, x_np, x_np, (10, 27, 27))
@tvm.testing.requires_cascadelake
def test_batch_matmul_vnni():
x_shape = (16, 32, 96)
y_shape = (16, 128, 96)
z_shape = (16, 32, 128)
for lhs_dtype in ["uint8", "int8"]:
x = relay.var("x", shape=x_shape, dtype=lhs_dtype)
y = relay.var("y", shape=y_shape, dtype="int8")
z = relay.var("z", shape=z_shape, dtype="int32")
bmm = relay.nn.batch_matmul(x, y, out_dtype="int32")
out = bmm + z
mod = tvm.IRModule.from_expr(out)
target = "llvm -mcpu=cascadelake"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.get_source("asm")
assert "vpdpbusd" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x_np = np.random.uniform(1, 10, size=x_shape).astype(lhs_dtype)
y_np = np.random.uniform(1, 10, size=y_shape).astype("int8")
z_np = np.random.uniform(1, 10, size=z_shape).astype("int32")
runtim |
e.set_input("x", x_np)
runtime.set_input("y", y_np)
runtime.set_input("z", z_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = tvm.topi.testing.batch_matmul(x_np, y_np, out_dtype="int32") + z_np
np.testing.assert_equal(out, ref)
@pytest.mark.skip("Requires GFX10 AMDGPU")
def test_batch_matmul_rocm_sdot4():
x_shape = (16, 32, 96)
y_shape = (16, 128, 96)
lhs_dtype = "int8"
x = relay.var("x", shape=x_shape, dtype=lhs_dtype)
y = relay.var("y", shape=y_shape, dtype="int8")
bmm = relay.nn.batch_matmul(x, y, out_dtype="int32")
mod = tvm.IRModule.from_expr(bmm)
target = "rocm -mattr=+dotprod"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.imported_modules[0].get_source("asm")
assert "v_dot4_i32_i8" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x_np = np.random.uniform(1, 10, size=x_shape).astype(lhs_dtype)
y_np = np.random.uniform(1, 10, size=y_shape).astype("int8")
runtime.set_input("x", x_np)
runtime.set_input("y", y_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = tvm.topi.testing.batch_matmul(x_np, y_np, out_dtype="int32")
np.testing.assert_equal(out, ref)
@tvm.testing.uses_gpu
def test_shape_of():
shape = (10, 5, 12)
x = relay.var("x", shape=shape)
func = relay.Function([x], relay.op.shape_of(x))
func = run_infer_type(func)
x_data = np.random.rand(*shape).astype("float32")
for target, dev in tvm.testing.enabled_targets():
for kind in ["vm"]:
op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.numpy(), np.array(shape).astype("int32"))
@tvm.testing.uses_gpu
def test_ndarray_size(executor_kind):
def verify_ndarray_size(shape):
x = relay.var("x", shape=shape)
func = relay.Funct |
ion([x], relay.op.ndarray_size(x))
func = run_infer_type(func)
x_data = np.random.uniform(size=shape).astype("float32")
ref_res = np.size(x_data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify_ndarray_size((2, 3, 5))
verify_ndarray_size((2, 3, 5, 7))
def verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc):
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in dshape], dtype=dtype)
y = opfunc(x, out_size, layout)
func = relay.Function([x], y)
np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
for target, dev in tvm.testing.enabled_targets():
relay_out = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
np_data
)
tvm.testing.assert_allclose(relay_out.numpy(), np_out, rtol=1e-5, atol=1e-5)
def verify_adaptive_pool1d(dshape, out_size, pool_type, layout="NCW", dtype="float32"):
opfunc = relay.nn.adaptive_avg_pool1d if pool_type == "avg" else relay.nn.adaptive_max_pool1d
verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc)
def verify_adaptive_pool2d(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
opfunc = relay.nn.adaptive_avg_pool2d if pool_type == "avg" else relay.nn.adaptive_max_pool2d
verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc)
def verify_adaptive_pool3d(dshape, out_size, pool_type, layout="NCDHW", dtype="float32"):
opfunc = relay.nn.adaptive_avg_pool3d if pool_type == "avg" else relay.nn.adaptive_max_pool3d
verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.