text
stringlengths 1
2.05k
|
---|
import relay
from tvm.contrib |
import graph_executor
from tvm.runtime.vm |
import VirtualMachine
from tvm.topi.nn.qnn |
import SQNN_DTYPE_TO_CODE
def allclose_with_rounding(a, b):
mismatch = a != b
assert np.sum(mismatch) <= 3
def quantize_test_driver(in_dtype, quant_args, axis, out_dtype, in_data):
shape = in_data.shape
input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
output_zero_point = relay.const(quant_args["out_zero_point"])
output_scale = relay.const(quant_args["out_scale"])
quantized_output = relay.qnn.op.quantize(
input_data,
output_scale=output_scale,
output_zero_point=output_zero_point,
axis=axis,
out_dtype=out_dtype,
)
mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input(input_data=in_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
return res
def build_simulated_quantize(input_data, scale, zp, dtype, axis=-1):
sim_q = relay.qnn.op.simulated_quantize(
input_data,
scale,
zp,
axis=axis,
out_dtype=dtype,
)
mod = tvm.IRModule.from_expr(sim_q)
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, "llvm", params=None)
vm = VirtualMachine(vm_exec, tvm.cpu(0))
return vm
def verify_simulated_quantize_simple(dtype):
data = np.random.uniform(low=-128, high=127, size=[2, 5]).astype("float32")
scale_np = np.float32(0.5)
zp_np = np.int32(127)
dtype_np = np.int32(SQNN_DTYPE_TO_CODE[dtype])
quant_args = {"out_zero_point": zp_np, "out_scale": scale_np}
q_out = quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype=dtype,
in_data=data,
)
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = rel |
ay.var("scale", shape=[])
zp = relay.var("zp", shape=[], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_quantize(input_data, scale, zp, dtype)
sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
allclose_with_rounding(sim_q_out.numpy(), q_out)
def test_simulated_quantize():
verify_simulated_quantize_simple("uint8")
verify_simulated_quantize_simple("int8")
verify_simulated_quantize_simple("int32")
def test_dynamic_channels():
data = np.random.uniform(low=-64, high=64, size=[2, 5]).astype("float32")
scale_np = np.asarray([0.5]).astype("float32")
zp_np = np.asarray([127]).astype("int32")
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["uint8"])
quant_args = {"out_zero_point": zp_np[0], "out_scale": scale_np[0]}
q_out = quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=0,
out_dtype="uint8",
in_data=data,
)
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[relay.Any()], dtype="float32")
zp = relay.var("zp", shape=[relay.Any()], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_quantize(input_data, scale, zp, dtype, axis=0)
sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
allclose_with_rounding(sim_q_out.numpy(), q_out)
scale_np = np.array([0.5, 0.25]).astype("float32")
zp_np = np.array([127, 123]).astype("int32")
quant_args = {"out_zero_point": zp_np, "out_scale": scale_np}
q_out = quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=0,
out_dtype="uint8",
in_data=data,
)
sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
allclose_with_rounding(sim_q_out.numpy(), q_out)
def test_dynamic_dtype():
data = np.ra |
ndom.uniform(low=-64, high=64, size=[2, 5]).astype("float32")
scale_np = np.asarray([0.5]).astype("float32")
zp_np = np.asarray([127]).astype("int32")
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["uint8"])
quant_args = {"out_zero_point": zp_np[0], "out_scale": scale_np[0]}
q_out = quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="uint8",
in_data=data,
)
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[relay.Any()], dtype="float32")
zp = relay.var("zp", shape=[relay.Any()], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_quantize(input_data, scale, zp, dtype)
sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
allclose_with_rounding(sim_q_out.numpy(), q_out)
q_out = quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="int32",
in_data=data,
)
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["int32"])
sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
allclose_with_rounding(sim_q_out.numpy(), q_out)
if __name__ == "__main__":
test_simulated_quantize()
test_dynamic_channels()
test_dynamic_dtype() |
import tvm |
import numpy as np
from tvm |
import relay
def qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp, data_dtype="uint8"):
assert len(x_datas) == len(y_datas)
assert len(y_datas) == len(golden_outputs)
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
lhs_scale = relay.const(scale_and_zp["lhs_scale"], "float32")
lhs_zp = relay.const(scale_and_zp["lhs_zp"], "int32")
rhs_scale = relay.const(scale_and_zp["rhs_scale"], "float32")
rhs_zp = relay.const(scale_and_zp["rhs_zp"], "int32")
output_scale = relay.const(scale_and_zp["output_scale"], "float32")
output_zp = relay.const(scale_and_zp["output_zp"], "int32")
z = relay.qnn.op.subtract(
lhs=x,
rhs=y,
lhs_scale=lhs_scale,
lhs_zero_point=lhs_zp,
rhs_scale=rhs_scale,
rhs_zero_point=rhs_zp,
output_scale=output_scale,
output_zero_point=output_zp,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
for i in range(0, len(x_datas)):
x_data = x_datas[i]
y_data = y_datas[i]
golden_output = golden_outputs[i]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_tflite_same_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.00784314,
"lhs_zp": 127,
"rhs_scale": 0.00784314,
"rhs_zp": 127,
"output_scale": 0.00784314,
"output_zp": 127,
}
x_datas = [
np.array((140, 153, 165, 178)).reshape((1, 4)),
np.array((25, 153, 178, 216)).reshape((1, 4)),
np.array((25, 153, 216, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 165, 140)).reshape((1, 4)),
np.array((204, 178, 191, 25)).reshape((1, 4)),
np |
.array((204, 178, 25, 191)).reshape((1, 4)),
]
golden_outputs = [
np.array((63, 102, 127, 165)).reshape((1, 4)),
np.array((0, 102, 114, 255)).reshape((1, 4)),
np.array((0, 102, 255, 101)).reshape((1, 4)),
]
qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_tflite_different_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.0156863,
"lhs_zp": 127,
"rhs_scale": 0.0117647,
"rhs_zp": 85,
"output_scale": 0.0235294,
"output_zp": 128,
}
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
golden_outputs = [
np.array((68, 120, 123, 192)).reshape((1, 4)),
np.array((106, 120, 128, 140)).reshape((1, 4)),
np.array((68, 120, 192, 119)).reshape((1, 4)),
]
qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_saturation():
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 1, 1, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 128, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 0, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.25,
"output_zp": 0,
}
x_data = [np.array((255, 1, 200, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 127, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 36, 0)).reshape((1, 4))]
qnn_subtract_driver(x_da |
ta, y_data, golden_output, scale_and_zp)
scale_and_zp = {
"lhs_scale": 0.5,
"lhs_zp": 0,
"rhs_scale": 0.25,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 0, 1, 0)).reshape((1, 4))]
y_data = [np.array((0, 128, 64, 0)).reshape((1, 4))]
golden_output = [np.array((255, 0, 0, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation() |
from typing |
import Callable, List |
import numpy as np |
import pytest |
import scipy.special |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.relay.qnn.op.legalizations |
import hardswish_func
def dequantize(data, scale, zp):
return scale * (np.asarray(data) - zp)
def generate_golden_output(
floating_point_golden_func, dequantized_x, output_scale, output_zero_point, dtype
):
output = floating_point_golden_func(dequantized_x)
output = np.around(output / output_scale + output_zero_point)
np_dtype = {"int8": np.int8, "uint8": np.uint8}[dtype]
q_min = np.iinfo(np_dtype).min
q_max = np.iinfo(np_dtype).max
return np.clip(output, q_min, q_max)
def run_qnn_func(func: relay.Function, args: List[relay.Expr]):
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.Legalize()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(*args)
return op_res.numpy()
def create_qnn_func(
qnn_op: Callable[[relay.Expr, relay.Expr, relay.Expr, relay.Expr, relay.Expr], relay.Call],
x_data: np.ndarray,
input_scale: float,
input_zero_point: int,
output_scale: float,
output_zero_point: int,
input_dtype: str = "uint8",
):
x = relay.var("x", shape=x_data.shape, dtype=input_dtype)
y = qnn_op(
x=x,
scale=relay.const(input_scale, "float32"),
zero_point=relay.const(input_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
return relay.Function([x], y)
def run_condition(
qnn_op: Callable[[relay.Expr, relay.Expr, relay.Expr, relay.Expr, relay.Expr], relay.Call],
floating_point_golden_func: Callable[[np.ndarray], np.ndarray],
x_data: np.ndarray,
input_scale: float,
input_zero_point: int,
output_scale: float,
output_zero_point: int,
input_dtype: str = "uint8",
):
func = create_qnn_func(
qnn_op,
x_data,
input_scale=input_scale,
input_zero_point=input_zero_point, |
output_scale=output_scale,
output_zero_point=output_zero_point,
input_dtype=input_dtype,
)
x_dequantized = dequantize(x_data, input_scale, input_zero_point)
golden_output = generate_golden_output(
floating_point_golden_func,
x_dequantized,
output_scale,
output_zero_point,
dtype=input_dtype,
)
op_res = run_qnn_func(func, [x_data])
np.testing.assert_equal(op_res, golden_output.astype(input_dtype))
def generic_test(
qnn_op: Callable[[relay.Expr, relay.Expr, relay.Expr, relay.Expr, relay.Expr], relay.Call],
floating_point_golden_func: Callable[[np.ndarray], np.ndarray],
input_dtype: str = "uint8",
x_data: np.ndarray = np.arange(0, 256, dtype="uint8"),
):
x_data = x_data.view(input_dtype)
return run_condition(
qnn_op,
floating_point_golden_func,
x_data,
input_scale=0.125,
input_zero_point=0,
output_scale=0.125,
output_zero_point=0,
input_dtype=input_dtype,
)
class TestRSqrt:
def test_saturation(self):
x_data = np.array((255, 133, 0, 9)).reshape((1, 4))
run_condition(
relay.qnn.op.rsqrt,
lambda x: 1 / np.sqrt(x),
x_data,
input_scale=0.125,
input_zero_point=0,
output_scale=0.125,
output_zero_point=0,
input_dtype="uint8",
)
run_condition(
relay.qnn.op.rsqrt,
lambda x: 1 / np.sqrt(x),
x_data,
input_scale=0.125,
input_zero_point=0,
output_scale=0.25,
output_zero_point=0,
input_dtype="uint8",
)
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.rsqrt, lambda x: 1 / np.sqrt(x), input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(
relay.qnn.op.rsqrt,
lambda x: 1 / np.sqrt(x),
input_dtype="int8",
x_data= |
np.arange(1, 128, dtype="int8"),
)
class Sqrt:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.sqrt, np.sqrt, input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(
relay.qnn.op.sqrt,
np.sqrt,
input_dtype="int8",
x_data=np.arange(1, 128, dtype="int8"),
)
class TestExp:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.exp, np.exp, input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(relay.qnn.op.exp, np.exp, input_dtype="int8")
class TestTanh:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.tanh, np.tanh, input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(relay.qnn.op.tanh, np.tanh, input_dtype="int8")
class TestErf:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.erf, scipy.special.erf, input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(relay.qnn.op.erf, scipy.special.erf, input_dtype="int8")
class TestSigmoid:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.sigmoid, lambda x: 1 / (1 + np.exp(-x)), input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(relay.qnn.op.sigmoid, lambda x: 1 / (1 + np.exp(-x)), input_dtype="int8")
class TestHardswish:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.hardswish, hardswish_func, input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(relay.qnn.op.hardswish, hardswish_func, input_dtype="int8")
if __name__ == "__main__":
tvm.testing.main() |
import os |
import numpy as np |
import tvm
from tvm |
import te, runtime |
import json |
import base64
from tvm._ffi.base |
import py_str
from tvm.relay.op |
import add
from tvm |
import relay
from tvm |
import rpc
from tvm.contrib |
import utils, graph_executor
def test_save_load():
x = np.ones((10, 2)).astype("float32")
y = np.ones((1, 2, 3)).astype("float32")
params = {"x": x, "y": y}
param_bytes = runtime.save_param_dict(params)
assert isinstance(param_bytes, bytearray)
param2 = relay.load_param_dict(param_bytes)
assert len(param2) == 2
np.testing.assert_equal(param2["x"].numpy(), x)
np.testing.assert_equal(param2["y"].numpy(), y)
def test_ndarray_reflection():
np_array = np.random.uniform(size=(10, 2)).astype("float32")
tvm_array = tvm.nd.array(np_array)
param_dict = {"x": tvm_array, "y": tvm_array}
assert param_dict["x"].same_as(param_dict["y"])
deser_param_dict = relay.load_param_dict(runtime.save_param_dict(param_dict))
np.testing.assert_equal(deser_param_dict["x"].numpy(), tvm_array.numpy())
np.testing.assert_equal(deser_param_dict["x"].numpy(), deser_param_dict["y"].numpy())
def test_bigendian_rpc_param():
"""Test big endian rpc when there is a PowerPC RPC server available"""
host = os.environ.get("TVM_POWERPC_TEST_HOST", None)
port = os.environ.get("TVM_POWERPC_TEST_PORT", 9090)
if host is None:
return
def verify_graph_executor(remote, target, shape, dtype):
x = relay.var("x")
y = relay.const(1)
z = relay.add(x, y)
func = relay.Function([x], z)
x_in = np.ones(shape).astype(dtype)
params = {"x": x_in}
graph, lib, params = relay.build(func, target=target, params=params)
temp = utils.tempdir()
path_dso = temp.relpath("dev_lib.o")
lib.save(path_dso)
remote.upload(path_dso)
lib = remote.load_module("dev_lib.o")
dev = remote.cpu(0)
mod = graph_executor.create(graph, lib, dev)
mod.load_params(runtime.save_param_dict(params))
mod.run()
out = mod.get_output(0, tvm.nd.empty(shape, dtype=dtype, device=dev))
tvm.testing.assert_allclose(x_in + 1, out.numpy())
print("Test RPC conn |
ection to PowerPC...")
remote = rpc.connect(host, port)
target = "llvm -mtriple=powerpc-linux-gnu"
for dtype in ["float32", "float64", "int32", "int8"]:
verify_graph_executor(remote, target, (10,), dtype)
if __name__ == "__main__":
test_save_load()
test_ndarray_reflection()
test_bigendian_rpc_param() |
"""Test alter op layout pass""" |
import pytest |
import tvm
from tvm |
import relay, topi
from tvm.relay |
import transform, analysis
from tvm.relay.testing.temp_op_attr |
import TempOpAttr
from tvm.relay.testing |
import run_infer_type |
import numpy as np |
import tvm.testing
from tvm.relay |
import testing
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_alter_op():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_return_none():
"""Test doing nothing by returning 'None'"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.nn.global_max_pool2d(x)
y = relay.Function([x], y)
return y
called = [False]
def alter_conv2d(attrs, inputs, tinfos, out_type):
called[0] = True
return None
with TempOpAttr("nn.globa |
l_max_pool2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(before(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
assert called[0]
def test_alter_layout():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias")
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.bias_add(y, bias)
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2))
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
new_attrs["kernel_layout"] = "OIHW16i"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.layout_transform(x, "NCHW", "NCHW16c")
w = relay.layout_transform(weight, "OIHW", "OIHW16i")
y = relay.nn.conv2d(
y,
w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
kernel_layout="OIHW16i",
data_layout="NCHW16c",
)
b = relay.expand_dims(bias, axis=1, num_newaxis=2)
b = relay.expand_dims(b, axis=0, num_newaxis=1)
b = relay.layout_transform(b, "NCHW", "NCHW16c")
y = relay.add(y, b)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layo |
ut="NCHW16c")
y = relay.cast(y, "int32")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_multi():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=128, kernel_size=(3, 3), padding=(1, 1))
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
new_attrs["kernel_layout"] = "OHWI16i64o2i"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(128, 64, 3, 3))
y = relay.layout_transform(x, "NCHW", "NCHW16c")
w = relay.layout_transform(weight, "OIHW", "OHWI16i64o2i")
y = relay.nn.conv2d(
y,
w,
channels=128,
kernel_size=(3, 3),
padding=(1, 1),
kernel_layout="OHWI16i64o2i",
data_layout="NCHW16c",
)
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(exp |
ected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_lrn():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias")
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.max_pool2d(y, pool_size=(2, 2))
y = relay.nn.lrn(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
new_attrs["kernel_layout"] = "OIHW16i"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.layout_transform(x, "NCHW", "NCHW16c")
w = relay.layout_transform(weight, "OIHW", "OIHW16i")
y = relay.nn.conv2d(
y,
w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
kernel_layout="OIHW16i",
data_layout="NCHW16c",
)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NCHW16c")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.nn.lrn(y)
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_dual_path():
"""
Test alternating the layout with |
two outputs.
One path continues to use the new layout while one path fall backs to old layout.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1))
y1 = relay.nn.relu(y1)
y2 = relay.nn.batch_flatten(y)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(
y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y1 = relay.nn.relu(y1)
y1 = relay.layout_transform(y1, "NCHW16c", "NCHW")
y2 = relay.layout_transform(y, "NCHW16c", "NCHW")
y2 = relay.nn.batch_flatten(y2)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_resnet():
"""Test alternating the layout of a residual block
This also tests th |
e elimination of duplicated transformation.
If a same transformation applies to a same node twice, only one transformation will be created.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1))
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y)
return relay.Function(analysis.free_vars(y), y)
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
x = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1), data_layout="NCHW16c")
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y, layout="NCHW16c")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
return relay.Function(analysis.free_vars(y), y)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_broadcast_op():
"""Test boradcast operators"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
scale = relay.var("scale", sh |
ape=(64, 1, 1))
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.bias_add(y, bias)
y = relay.multiply(scale, y)
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
scale = relay.var("scale", shape=(64, 1, 1))
weight = relay.var("weight")
x = relay.layout_transform(x, "NCHW", "NCHW16c")
bias = relay.expand_dims(bias, 1, 2)
bias = relay.expand_dims(bias, 0, 1)
bias = relay.layout_transform(bias, "NCHW", "NCHW16c")
scale = relay.expand_dims(scale, 0, 1)
scale = relay.layout_transform(scale, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.add(y, bias)
y = relay.multiply(scale, y)
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_broadcast_scalar_op():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 500, 500, 64))
kernel = relay.var("kernel", shape=(3, 3, 64, 64), dtype="float32")
bias = relay.var("bias", shape |
=(64,))
multiplier1 = relay.var("multiplier1", shape=(1,), dtype="float32")
multiplier2 = relay.var("multiplier2", shape=(1, 1), dtype="float32")
y = relay.nn.conv2d(x, kernel, data_layout="NHWC", kernel_layout="HWIO", kernel_size=(3, 3))
y = relay.add(bias, y)
y = relay.nn.relu(y)
y = relay.multiply(multiplier1, y)
y = relay.multiply(y, multiplier2)
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 500, 500, 64))
kernel = relay.var("kernel", shape=(3, 3, 64, 64), dtype="float32")
bias = relay.var("bias", shape=(64,))
multiplier1 = relay.var("multiplier1", shape=(1,), dtype="float32")
multiplier2 = relay.var("multiplier2", shape=(1, 1), dtype="float32")
b = relay.expand_dims(bias, axis=0, num_newaxis=3)
b = relay.layout_transform(b, "NHWC", "NCHW16c")
y = relay.layout_transform(x, "NHWC", "NCHW16c")
y = relay.nn.conv2d(
y, kernel, data_layout="NCHW16c", kernel_layout="HWIO", kernel_size=(3, 3)
)
y = relay.add(b, y)
y = relay.nn.relu(y)
y = relay.multiply(multiplier1, y)
y = relay.multiply(y, multiplier2)
y = relay.layout_transform(y, "NCHW16c", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_scalar():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators a |
nd the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.add(y, relay.const(1, "float32"))
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
w = relay.var("weight")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, w, channels=64, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.add(y, relay.const(1.0, "float32"))
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_scalar_regression():
"""regression test where scalar fails"""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 16))
bias = relay.var("bias", shape=(1, 1, 1, 16))
y = relay.nn.conv2d(
x,
weight,
channels=16,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.add(y, bias)
mean = relay.mean(y, axis=3, exclude=True)
var = relay.variance(y, axis=3, exclude=True)
gamma = relay.var("gamma")
beta = r |
elay.var("beta")
y = relay.nn.batch_norm(y, gamma, beta, mean, var, axis=3)
y = y[0]
return relay.Function(analysis.free_vars(y), y)
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 16))
bias = relay.var("bias", shape=(1, 1, 1, 16))
x = relay.layout_transform(x, src_layout="NHWC", dst_layout="NCHW")
x = relay.layout_transform(x, src_layout="NCHW", dst_layout="NCHW16c")
weight = relay.layout_transform(weight, src_layout="HWIO", dst_layout="OIHW")
y = relay.nn.conv2d(
x, weight, channels=16, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
bias = relay.layout_transform(bias, src_layout="NHWC", dst_layout="NCHW")
bias = relay.layout_transform(bias, src_layout="NCHW", dst_layout="NCHW16c")
add = relay.add(y, bias)
mean = relay.mean(add, axis=[1, 4], exclude=True)
var = relay.variance(add, axis=[1, 4], exclude=True)
denom = relay.const(1.0) / relay.sqrt(var + relay.const(1e-05))
gamma = relay.var("gamma", shape=(16,))
denom_c16c = denom * relay.layout_transform(gamma, src_layout="C", dst_layout="C16c")
denom = relay.layout_transform(denom_c16c, src_layout="C16c", dst_layout="C")
denom_expand1 = relay.expand_dims(denom, axis=1, num_newaxis=2)
denom_expand2 = relay.expand_dims(denom_expand1, axis=0)
denom_nchwc16 = relay.layout_transform(
denom_expand2, src_layout="NCHW", dst_layout="NCHW16c"
)
out = add * denom_nchwc16
beta = relay.var("beta", shape=(16,))
numerator_c16c = (-mean) * denom_c16c + relay.layout_transform(
beta, src_layout="C", dst_layout="C16c"
)
numerator |
= relay.layout_transform(numerator_c16c, src_layout="C16c", dst_layout="C")
numerator_expand1 = relay.expand_dims(numerator, axis=1, num_newaxis=2)
numerator_expand2 = relay.expand_dims(numerator_expand1, axis=0)
numerator_nchwc16 = relay.layout_transform(
numerator_expand2, src_layout="NCHW", dst_layout="NCHW16c"
)
out = out + numerator_nchwc16
out = relay.layout_transform(out, src_layout="NCHW16c", dst_layout="NCHW")
y = relay.layout_transform(out, src_layout="NCHW", dst_layout="NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
desired_layouts = {"nn.conv2d": ["NCHW", "default"], "nn.batch_norm": ["NHWC", "default"]}
a = run_opt_pass(
a,
[
transform.InferType(),
relay.transform.ConvertLayout(desired_layouts),
transform.SimplifyInference(),
transform.CanonicalizeOps(),
transform.AlterOpLayout(),
],
)
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_concatenate():
"""NCHW, NHWC and corner case concatenate layout transform."""
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def before_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y1 = relay.nn.conv2d(y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1))
ret = relay.concatenate([y, y1], axis=1)
y = relay.Function(analysis.free_vars(ret), ret)
retur |
n y
def expected_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y1 = relay.nn.conv2d(
y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.concatenate([y, y1], axis=1)
ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nchw()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nchw(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def before_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.nn.conv2d(
x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
)
y1 = relay.nn.conv2d(
y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
)
ret = relay.concatenate([y, y1], axis=3)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.layout_transform(x, "NHWC", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y1 = relay.nn.conv2d(
y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.concatenate([y, y1], axis=1)
ret = relay.layout |
_transform(ret, "NCHW16c", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_nchw_upsamping_op():
"""Test upsamping operators"""
def before():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight", shape=(32, 32, 3, 3))
y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.upsampling(y, scale_h=2, scale_w=2)
y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2))
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight")
x = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.nn.upsampling(y, scale_h=2, scale_w=2, layout="NCHW16c")
y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2), layout="NCHW16c")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_nchw_dyn_upsamping_op():
"""Test upsamping operators""" |
def before():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight", shape=(32, 32, 3, 3))
y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.upsampling(y, scale_h=relay.const(2), scale_w=relay.const(2))
y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2))
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight")
x = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.nn.upsampling(y, scale_h=relay.const(2), scale_w=relay.const(2), layout="NCHW16c")
y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2), layout="NCHW16c")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
@tvm.testing.parametrize_targets("llvm")
def test_alter_layout_strided_slice(target, dev):
"""Test rewriting strided_slice during alter_iop_layout"""
def before():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight", shape=(32, 32, 3, 3))
y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.strided_slice(y, begin=[0, 16], end=[1, 33], strides=[1, 1])
y = relay.Function(analysis.free_vars(y), y) |
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW4c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight", shape=(32, 32, 3, 3))
weight = relay.layout_transform(weight, "OIHW", "OIHW4i4o")
x = relay.layout_transform(x, "NCHW", "NCHW4c")
y = relay.op.nn.contrib_conv2d_nchwc(
x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW4c"
)
y = relay.strided_slice(y, begin=[0, 4], end=[1, 21], strides=[1, 1])
y = relay.layout_transform(y, "NCHW4c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
b = run_opt_pass(expected(), transform.InferType())
mod_before = tvm.IRModule()
mod_new = tvm.IRModule()
mod_before["main"] = a
mod_new["main"] = b
mod_before = transform.InferType()(mod_before)
mod_new = transform.InferType()(mod_new)
with relay.build_config(opt_level=3):
for kind in ["graph", "debug", "vm"]:
np_data = np.random.uniform(size=(1, 32, 28, 28)).astype("float32")
np_weight = np.random.uniform(size=(32, 32, 3, 3)).astype("float32")
f_before = relay.create_executor(
kind, mod=mod_before, device=dev, target=target
).evaluate()
result_before = f_before(np_data, np_weight)
f_new = relay.create_executor(kind, mod=mod_new, device=dev, target=target).evaluate()
result_new = f_new(np_data, np_weight)
tvm.testing.assert_allclose(
result_before.numpy(), result_new.numpy(), rtol=1e-5, atol=1e-5
)
def test_alter_layout_strided_slice_axes_nhwc():
"""Test rewriting strided_slice with axes during alter_io |
p_layout"""
def before():
x = relay.var("x", shape=(1, 28, 28, 32))
weight = relay.var("weight", shape=(3, 3, 32, 32))
y = relay.nn.conv2d(
x,
weight,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.strided_slice(y, begin=[0, 16], end=[1, 32], strides=[1, 1], axes=[0, 3])
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NHWC4c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 28, 28, 32))
weight = relay.var("weight", shape=(3, 3, 32, 32))
x = relay.layout_transform(x, "NHWC", "NHWC4c")
y = relay.op.nn.conv2d(
x,
weight,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC4c",
kernel_layout="HWIO",
)
y = relay.strided_slice(y, begin=[0, 4], end=[1, 8], strides=[1, 1], axes=[0, 3])
y = relay.layout_transform(y, "NHWC4c", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
mod_before = tvm.IRModule()
mod_new = tvm.IRModule()
mod_before["main"] = a
mod_new["main"] = b
assert tvm.ir.structural_equal(mod_before, mod_new)
def test_alter_layout_depthwise_conv2d():
"""Test depthwise_conv2d operator"""
def before():
x = relay.var("x", shape=(1, 32, 56, 56))
w = relay.var("w", shape=(32, 1, 3, 3))
y = relay.nn.conv2d(x, w, padding=(1, 1), channels=32, kernel_size=(3, 3), groups=32)
y = |
relay.Function(analysis.free_vars(y), y)
return y
from tvm |
import topi
def alter_conv2d(attrs, inputs, tinfos, out_type):
with tvm.target.Target("llvm -mcpu=core-avx2"):
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
def expected():
x = relay.var("x", shape=(1, 32, 56, 56))
w = relay.var("w", shape=(32, 1, 3, 3))
x = relay.layout_transform(x, "NCHW", "NCHW8c")
w = relay.layout_transform(w, "OIHW", "OIHW1i8o")
y = relay.nn.contrib_depthwise_conv2d_nchwc(
x,
w,
padding=(1, 1, 1, 1),
channels=32,
kernel_size=(3, 3),
groups=32,
data_layout="NCHW8c",
kernel_layout="OIHW1i8o",
out_layout="NCHW8c",
)
y = relay.layout_transform(y, "NCHW8c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b)
def test_alter_layout_prelu():
"""Test PRelu operator"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight")
alpha = relay.var("alpha", relay.IncompleteType())
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.prelu(y, alpha)
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
w = relay.var("weight")
alpha = relay.var("alpha", relay.IncompleteType())
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d( |
y, w, channels=64, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.nn.prelu(y, alpha)
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b)
def test_alter_layout_pad():
"""Check NCHW, NHWC and corner case for pad layout conversion"""
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def before_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
ret = relay.nn.pad(y, pad_width=((0, 0), (0, 0), (1, 1), (1, 1)))
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.nn.pad(y, pad_width=((0, 0), (0, 0), (1, 1), (1, 1), (0, 0)))
ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nchw()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nchw(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def before_nhwc( |
):
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(
x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
)
ret = relay.nn.pad(y, pad_width=((0, 0), (1, 1), (1, 1), (0, 0)))
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NHWC", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.nn.pad(y, pad_width=((0, 0), (0, 0), (1, 1), (1, 1), (0, 0)))
ret = relay.layout_transform(ret, "NCHW16c", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
ret = relay.nn.pad(y, pad_width=((0, 0), (1, 1), (1, 1), (1, 1)))
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.layout_transform(y, "NCHW16c", "NCHW")
ret = relay.nn.pad(ret, pad_width=((0, 0), (1, 1), (1, 1), (1, 1)))
y = relay.Function(analysis.free_vars(ret), ret)
return y
with Te |
mpOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_pool():
"""Check NCHW, NHWC pool layout conversion"""
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def before_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
ret = relay.nn.avg_pool2d(y, pool_size=(1, 1))
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NCHW16c")
ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nchw()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nchw(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def before_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(
x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
)
ret = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NHWC")
y = relay.Function(analysi |
s.free_vars(ret), ret)
return y
def expected_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NHWC", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NCHW16c")
ret = relay.layout_transform(ret, "NCHW16c", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_sum():
"""Check NCHW, NHWC sum layout conversion"""
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def before_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
ret = relay.sum(y, axis=1, keepdims=True)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.sum(y, axis=[1, 4], keepdims=True)
ret = relay.layout_transform(ret, "NCHW1c", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = bef |
ore_nchw()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nchw(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def before_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(
x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
)
ret = relay.sum(y, axis=3, keepdims=True)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NHWC", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.sum(y, axis=[1, 4], keepdims=True)
ret = relay.layout_transform(ret, "NCHW1c", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_nhwc_arm():
"""Check that AlterOplayout does not alter NHWC data layout."""
def alter_conv2d(attrs, inputs, tinfos, out_type):
from tvm |
import topi
with tvm.target.Target("llvm -device=arm_cpu"):
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
def before_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x, weight1, channels=64, kernel_size=(3, 3), data_layout="NHWC", kernel_layout="HWIO"
)
y = relay.nn.relu(y)
y = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NHWC")
y = relay.nn.conv2d(
y, weight2, channels=64, kernel_size=(3, 3), data_layout="NHWC", kernel_layout="HWIO"
)
y = relay.nn.relu(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected_nhwc():
return before_nhwc()
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_nhwc_int8_aarch64():
"""Check that AlterOplayout does not alter NHWC data layout."""
from tvm |
import autotvm
expected_workload_shape = (20, 42, 4, 16) |
class Int8Fallback(autotvm.FallbackContext):
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = autotvm.task.space.FallbackConfigEntity()
cfg.is_fallback = False
cfg.cost = 0
self.memory[key] = cfg
return cfg
def update(self, target, workload, cfg):
key = (str(target), workload)
assert workload[2][1] == expected_workload_shape
assert workload[0] == "conv2d_NHWC_quantized_interleaved_without_transform.arm_cpu"
self.memory[key] = cfg
def alter_conv2d(attrs, inputs, tinfos, out_type):
from tvm |
import topi
with tvm.target.Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu"):
with Int8Fallback():
tmp = topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
return tmp
def before_nhwc_int8():
x = relay.var("x", shape=(1, 56, 56, 73), dtype="int8")
weight = relay.var("weight1", shape=(3, 3, 73, 79), dtype="int8")
y = relay.nn.conv2d(
x,
weight,
channels=79,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected_nhwc_int8():
x = relay.var("x", shape=(1, 56, 56, 73), dtype="int8")
weight = relay.var("weight1", shape=(3, 3, 73, 79), dtype="int8")
tile_rows = 4
tile_cols = 16
weight_transformed = relay.nn.contrib_conv2d_gemm_weight_transform(
weight, tile_rows, tile_cols
)
y = relay.nn.contrib_conv2d_gemm_without_weight_transform(
x,
weight_transformed,
channels=79,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc_int8()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc_int8(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_op_with_global_var():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
mod = |
tvm.IRModule()
foo = relay.GlobalVar("foo")
mod[foo] = relay.Function([x, weight], y)
mod = transform.InferType()(mod)
mod["main"] = relay.Function([x, weight], foo(x, weight))
mod = transform.InferType()(mod)
return mod
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.nn.relu(y)
mod = tvm.IRModule()
foo = relay.GlobalVar("foo")
mod[foo] = relay.Function([x, weight], y)
mod = transform.InferType()(mod)
mod["main"] = relay.Function([x, weight], foo(x, weight))
return mod
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = transform.AlterOpLayout()(a)
b = transform.InferType()(expected())
assert tvm.ir.structural_equal(a, b, map_free_vars=True), "Actual = \n" + str(a)
def test_alter_op_dense():
def before():
x = relay.var("x", shape=(32, 1, 128))
weight = relay.var("weight", shape=(48, 64))
avg1d = relay.nn.adaptive_avg_pool1d(x, [64])
squeeze = relay.squeeze(avg1d, axis=[1])
y = relay.nn.dense(squeeze, weight)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(32, 1, 128))
weight = relay.var("weight", shape=(48, 64))
target_layout = "NC16n"
weight_transform = relay.layout_transform(weight, "NC", target_layout)
avg1d = relay.nn.adaptive_avg_pool1d(x, [64])
squeeze = relay.squeeze(avg1d, axis=[1]) |
y = relay.nn.contrib_dense_pack(
squeeze, weight_transform, target_layout, units=None, out_dtype="float32"
)
y = relay.Function(analysis.free_vars(y), y)
return y
target = "llvm -mcpu=core-avx2"
with tvm.target.Target(target):
with TempOpAttr(
"nn.dense", "FTVMAlterOpLayout", topi.x86.dense_alter_op._alter_dense_layout
):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b)
def test_not_inplace_modify():
def func():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=[2, 2], strides=[2, 2], padding=[0, 0, 0, 0])
y = relay.Function([x, weight], y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
new_attrs["kernel_layout"] = "OIHW16i"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
before = func()
run_opt_pass(before, [transform.AlterOpLayout()])
assert before.body.attrs.layout == "NCHW"
def test_alter_op_dense_packed_data():
def before():
x = relay.var("x", shape=(1, 32, 8, 8))
weight = relay.var("conv2d_weight", shape=(32, 32, 3, 3))
conv = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
pool = relay.nn.avg_pool2d(conv, pool_size=[8, 8], padding=[0, 0, 0, 0])
squeeze = relay.squeeze(pool, axis=[2, 3])
dense = relay.nn.dense(squeeze, relay.var("dense_weight", shape=(16, 32)))
return relay.Function(analysis.free_vars(dense), dense)
def |
expected():
x = relay.var("x", shape=(1, 32, 8, 8))
conv_weight = relay.var("conv2d_weight", shape=(32, 32, 3, 3))
dense_weight = relay.var("dense_weight", shape=(16, 32))
conv = relay.nn.contrib_conv2d_nchwc(
relay.layout_transform(x, "NCHW", "NCHW8c"),
relay.layout_transform(conv_weight, "OIHW", "OIHW8i8o"),
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW8c",
kernel_layout="OIHW8i8o",
out_layout="NCHW8c",
)
pool = relay.nn.avg_pool2d(conv, pool_size=[8, 8], padding=[0, 0, 0, 0], layout="NCHW8c")
squeeze = relay.squeeze(pool, axis=[2, 3])
dense = relay.nn.contrib_dense_pack(
relay.layout_transform(squeeze, "NC8c", "NC"),
relay.layout_transform(dense_weight, "NC", "NC16n"),
"NC16n",
out_dtype="float32",
)
return relay.Function(analysis.free_vars(dense), dense)
with tvm.target.Target("llvm -mcpu=core-avx2"):
with TempOpAttr(
"nn.dense", "FTVMAlterOpLayout", topi.x86.dense_alter_op._alter_dense_layout
):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b)
def test_conv2d_strided_slice_packed_to_unpacked():
"""We do not support propagating through packed to unpacked layout"""
x_shape = (1, 1, 1, 1, 4)
w_shape = (9, 1, 3, 3, 4, 4)
def before():
x = relay.var("x", shape=x_shape)
weight = relay.var("weight", shape=w_shape)
y = relay.nn.conv2d(
x,
weight,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
)
y = relay.strided_slice(y, begin=[0, 0], end=[1, -1], strides=[1, 8])
return relay.Function([x, weight], y)
def expected():
x = relay.var |
("x", shape=x_shape)
weight = relay.var("weight", shape=w_shape)
x_nchw = relay.layout_transform(x, src_layout="NCHW4c", dst_layout="NCHW")
weight_oihw = relay.layout_transform(weight, src_layout="OIHW4i4o", dst_layout="OIHW")
y = relay.nn.conv2d(
x_nchw,
weight_oihw,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.layout_transform(y, src_layout="NCHW", dst_layout="NCHW4c")
y = relay.strided_slice(y, begin=[0, 0], end=[1, -1], strides=[1, 8])
return relay.Function([x, weight], y)
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW"
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b)
def test_conv2d_strided_slice_arbitrary_stride():
"""Test rewriting strided_slice with arbitrary stride"""
def before():
x = relay.var("x", shape=(4, 12, 1, 1))
weight = relay.var("weight", shape=(9, 12, 1, 1))
y = relay.nn.conv2d(x, weight, channels=9, kernel_size=(1, 1), padding=(0, 0))
y = relay.strided_slice(y, begin=[3], end=[6], strides=[3], axes=[1])
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW3c"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
run_opt_pass(before(), transform.AlterOpLayout())
def test_conv2d_reduce_channels():
x = relay.var("data", sh |
ape=(1, 8, 48, 48))
y = relay.nn.conv2d(
data=x,
weight=relay.var("weight"),
kernel_size=(1, 1),
channels=8,
dilation=1,
strides=(47, 47),
)
z = relay.argmin(y, axis=1)
mod, params = testing.create_workload(z)
with tvm.transform.PassContext(opt_level=3):
relay.build(mod, params=params, target="llvm")
def test_alter_layout_nonscalar_broadcast():
"""Test boradcast operators"""
def before():
x = relay.var("x", shape=(1, 16, 3, 3))
weight = relay.var("weight", shape=(16, 16, 1, 1))
y = relay.nn.conv2d(
x, weight, channels=16, kernel_size=(1, 1), padding=(0, 0), data_layout="NCHW"
)
z = relay.var("z", shape=(1, 3, 3))
y = y + z
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 16, 3, 3))
weight = relay.var("weight", shape=(16, 16, 1, 1))
x = relay.layout_transform(x, src_layout="NCHW", dst_layout="NCHW4c")
weight = relay.layout_transform(weight, src_layout="OIHW", dst_layout="OIHW4i4o")
y = relay.nn.conv2d(
x,
weight,
channels=16,
kernel_size=(1, 1),
padding=(0, 0),
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
)
z = relay.var("z", shape=(1, 3, 3))
z = relay.expand_dims(z, 0)
z = relay.layout_transform(z, src_layout="NCHW", dst_layout="NCHW1c")
y = y + z
y = relay.layout_transform(y, src_layout="NCHW4c", dst_layout="NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW4c"
new_attrs["kernel_layout"] = "OIHW4i4o"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a |
= run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\nExpected = \n" + str(b)
inp = np.random.uniform(size=(1, 16, 3, 3)).astype(np.float32)
weight = np.random.uniform(size=(16, 16, 1, 1)).astype(np.float32)
z = np.random.uniform(size=(1, 3, 3)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
np.testing.assert_allclose(res.numpy(), res1.numpy())
def test_alter_layout_blocked_no_broadcast():
"""Test boradcast operators working on already blocked layout"""
def before():
dtype = "float32"
input_shape = (1, 8, 16, 16, 4)
filter_shape = (1, 8, 4, 4, 4, 4)
bias_shape = (1, 1, 1, 1, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
C = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
bias = relay.op.add(conv, C)
bias = relay.Function(analysis.free_vars(bias), bias)
return bias
def expected():
return before()
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs[" |
data_layout"] = "NCHW4c"
new_attrs["kernel_layout"] = "OIHW4i4o"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\nExpected = \n" + str(b)
inp = np.random.uniform(size=(1, 8, 16, 16, 4)).astype(np.float32)
weight = np.random.uniform(size=(1, 8, 4, 4, 4, 4)).astype(np.float32)
z = np.random.uniform(size=(1, 1, 1, 1, 4)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
np.testing.assert_allclose(res.numpy(), res1.numpy())
def test_alter_layout_blocked_broadcast():
"""Test boradcast operators working on already blocked layout"""
def before():
dtype = "float32"
input_shape = (1, 8, 16, 16, 4)
filter_shape = (1, 8, 4, 4, 4, 4)
bias_shape = (1, 1, 1, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
C = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
bias = relay.op.add(conv, C)
bias = relay.Function(analysis.free |
_vars(bias), bias)
return bias
def expected():
return before()
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW4c"
new_attrs["kernel_layout"] = "OIHW4i4o"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\nExpected = \n" + str(b)
inp = np.random.uniform(size=(1, 8, 16, 16, 4)).astype(np.float32)
weight = np.random.uniform(size=(1, 8, 4, 4, 4, 4)).astype(np.float32)
z = np.random.uniform(size=(1, 1, 1, 1, 1)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
np.testing.assert_allclose(res.numpy(), res1.numpy())
def test_alter_layout_re_blocking_broadcast():
"""Test of re-blocking shapes with boradcast operators"""
def before():
dtype = "float32"
input_shape = (1, 8, 16, 16, 4)
filter_shape = (1, 8, 4, 4, 4, 4)
bias_shape = (1, 1, 1, 1, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
C = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
padding |
=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
bias = relay.op.add(conv, C)
bias = relay.Function(analysis.free_vars(bias), bias)
return bias
def expected():
dtype = "float32"
input_shape = (1, 8, 16, 16, 4)
filter_shape = (1, 8, 4, 4, 4, 4)
bias_shape = (1, 1, 1, 1, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
C = relay.var("bias", shape=bias_shape, dtype=dtype)
A = relay.layout_transform(A, src_layout="NCHW4c", dst_layout="NCHW2c")
B = relay.layout_transform(B, src_layout="OIHW4i4o", dst_layout="OIHW2i2o")
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW2c",
kernel_layout="OIHW2i2o",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
C = relay.layout_transform(C, src_layout="NCHW4c", dst_layout="NCHW2c")
bias = relay.op.add(conv, C)
bias = relay.layout_transform(bias, src_layout="NCHW2c", dst_layout="NCHW4c")
bias = relay.Function(analysis.free_vars(bias), bias)
return bias
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW2c"
new_attrs["kernel_layout"] = "OIHW2i2o"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\nExpected = \n" + str(b)
inp = np.random.uniform(size=(1, 8, 16, 16, 4)).astype(np.float32)
weight = np.random.uniform(size=(1, 8, 4, 4, 4, 4)).astype(np |
.float32)
z = np.random.uniform(size=(1, 1, 1, 1, 4)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
np.testing.assert_allclose(res.numpy(), res1.numpy(), rtol=1e-5, atol=1e-5)
def test_broadcast_non_adaptable():
"""NCHW4c + [x, x, 4] and NCHW4c is being altered to NCHW"""
def before():
x = relay.var("x", shape=(1, 4, 3, 3, 4))
weight = relay.var("weight", shape=(4, 4, 1, 1, 4, 4))
y = relay.nn.conv2d(
x,
weight,
channels=16,
kernel_size=(1, 1),
padding=(0, 0),
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
)
z = relay.var("z", shape=(3, 3, 4))
y = y + z
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 4, 3, 3, 4))
weight = relay.var("weight", shape=(4, 4, 1, 1, 4, 4))
x = relay.layout_transform(x, src_layout="NCHW4c", dst_layout="NCHW")
weight = relay.layout_transform(weight, src_layout="OIHW4i4o", dst_layout="OIHW")
y = relay.nn.conv2d(
x,
weight,
channels=16,
kernel_size=(1, 1),
padding=(0, 0),
data_layout="NCHW",
kernel_layout="OIHW",
)
z = relay.var("z", shape=(3, 3, 4))
y = relay.layout_transform(y, src_layout="NCHW", dst_layout="NCHW4c")
y = y + z
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, ti |
nfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW"
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\nExpected = \n" + str(b)
inp = np.random.uniform(size=(1, 4, 3, 3, 4)).astype(np.float32)
weight = np.random.uniform(size=(4, 4, 1, 1, 4, 4)).astype(np.float32)
z = np.random.uniform(size=(3, 3, 4)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
np.testing.assert_allclose(res.numpy(), res1.numpy())
def test_broadcast_respect_input_layouts():
def before():
x = relay.var("x", shape=(1, 16, 1, 1))
w = relay.var("w", shape=(16, 16, 1, 1))
x = relay.nn.conv2d(
x,
w,
kernel_size=(1, 1),
padding=(0, 0),
channels=16,
)
y1 = relay.min(x, axis=[2])
y2 = relay.min(x, axis=[3])
z = y1 + y2
z = relay.Function(analysis.free_vars(z), z)
return z
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW4c"
new_attrs["kernel_layout"] = "OIHW4i4o"
return relay.nn.conv2d(data, weight, **new_attrs)
inp |
= np.random.uniform(size=(1, 16, 1, 1)).astype(np.float32)
weight = np.random.uniform(size=(16, 16, 1, 1)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight)
np.testing.assert_allclose(res.numpy(), res1.numpy())
def test_axis_semantic_change():
x = relay.var("x", shape=(1, 1, 24, 48))
w1 = relay.const(np.random.uniform(size=(1, 1, 1, 1)))
w2 = relay.const(np.random.uniform(size=(1, 1, 1, 1)))
y = relay.nn.conv2d(x, w1, kernel_size=(1, 1), padding=(0, 0), channels=1)
y = relay.transpose(y, (0, 1, 3, 2))
z = relay.nn.conv2d(y, w2, kernel_size=(1, 1), padding=(0, 0), channels=1)
func = relay.Function([x], z)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3):
relay.build(mod, target="llvm")
def test_alter_with_subfunc():
v1 = relay.var("v", shape=[1, 256, 10, 10], dtype="float32")
v2 = relay.image.resize2d(v1, size=[16, 16], roi=[0.0, 0.0, 0.0, 0.0], rounding_method="")
sub_func = relay.Function([v1], v2)
x1 = relay.var("x", shape=[1, 256, 10, 10], dtype="float32")
x2 = sub_func(x1)
x3 = relay.image.resize2d(x2, size=[8, 8], roi=[0.0, 0.0, 0.0, 0.0], rounding_method="")
func = relay.Function([x1], x3)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(relay.transform.AlterOpLayout()(mod), mod)
def test_alter_with_reduce():
x = relay.var("x", shape=(1, 1, 1, 1))
y = relay.image.resize2d(x, (2, 4))
z = relay.mean(y, axis=0)
a = relay.image.resize1d(z, (1,) |
)
func = relay.Function((x,), a)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
with tvm.transform.PassContext(opt_level=4):
relay.build(mod, target="llvm")
if __name__ == "__main__":
pytest.main([__file__]) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for annotating spans."""
import tvm
import tvm.relay as relay
from tvm.relay import testing
import tvm.testing
def test_annotate_spans_compatibility():
data = relay.var("data", relay.TensorType((1, 3, 64, 64), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
simple_net = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=3, padding=(1, 1)
)
simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0]
simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
module, params = testing.create_workload(simple_net)
# Apply some simple passes to legalize the IR.
with tvm.transform.PassContext(opt_level=0):
module, params = relay.optimize(
module, target=tvm.testing.enabled_targets()[0][0], params=params
)
seq = tvm.transform.Sequential([relay.transform.AnnotateSpans(), relay.transform.DefuseOps()])
with tvm.transform.PassContext(opt_level=3):
module = seq(module)
if __name__ == "__main__":
tvm.testing.main()
|
"""Unit tests for annotating external targets.""" |
import os |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.relay.testing |
import tvm.relay.transform as transform
from tvm |
import relay
from tvm |
import runtime
from tvm.contrib |
import utils
def check_result(
mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu(), params=None
):
if sys.platform == "win32":
print("Skip test on Windows for now")
return
def update_lib(lib):
test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
source_dir = os.path.join(test_dir, "..", "..", "..")
contrib_path = os.path.join(source_dir, "src", "runtime", "contrib")
kwargs = {}
kwargs["options"] = ["-O2", "-std=c++17", "-I" + contrib_path]
tmp_path = utils.tempdir()
lib_name = "lib.so"
lib_path = tmp_path.relpath(lib_name)
lib.export_library(lib_path, fcompile=False, **kwargs)
lib = runtime.load_module(lib_path)
return lib
def check_vm_result():
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
exe = relay.vm.compile(mod, target=target, params=params)
code, lib = exe.save()
lib = update_lib(lib)
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, device)
out = vm.run(**map_inputs)
tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol)
def check_graph_executor_result():
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
json, lib, param = relay.build(mod, target=target, params=params)
lib = update_lib(lib)
rt_mod = tvm.contrib.graph_executor.create(json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol)
check_vm_result()
check_graph_executor_result()
def test_extern_dnnl():
def annotated(dtype, ishape, w1shape):
data = relay.var("data", shape |
=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data, weight1], out)
mod = tvm.IRModule.from_expr(f)
return mod
def expected(dtype, ishape, w1shape):
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
begin0 = relay.annotation.compiler_begin(data, "dnnl")
begin1 = relay.annotation.compiler_begin(weight1, "dnnl")
depthwise_conv2d_1 = relay.nn.conv2d(
begin0, begin1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
end0 = relay.annotation.compiler_end(depthwise_conv2d_1, "dnnl")
end1 = relay.annotation.compiler_end(depthwise_conv2d_1, "dnnl")
begin2 = relay.annotation.compiler_begin(end1, "dnnl")
begin3 = relay.annotation.compiler_begin(end0, "dnnl")
begin4 = relay.annotation.compiler_begin(weight1, "dnnl")
depthwise_conv2d_2 = relay.nn.conv2d(
begin3, begin4, kernel_size=(3, 3), padding=(1, 1), groups=32
)
end2 = relay.annotation.compiler_end(depthwise_conv2d_2, "dnnl")
begin5 = relay.annotation.compiler_begin(end2, "dnnl")
out = relay.add(begin2, begin5)
end3 = relay.annotation.compiler_end(out, "dnnl")
f = relay.Function([data, weight1], end3)
mod = tvm.IRModule.from_expr(f)
return mod
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
def test_annotate():
mod = annotated(dtype, ishape, w1shape)
mod = transform.AnnotateTarget("dnnl")(mod)
mod = relay.transform.InferType()(mod)
ref_ |
mod = expected(dtype, ishape, w1shape)
ref_mod = relay.transform.InferType()(ref_mod)
tvm.ir.assert_structural_equal(mod, ref_mod)
def test_run():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
ref_mod = annotated(dtype, ishape, w1shape)
mod = annotated(dtype, ishape, w1shape)
mod = transform.PartitionGraph()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w1_data
)
check_result(
mod, {"data": i_data, "weight1": w1_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5
)
test_annotate()
test_run()
@pytest.mark.skip(reason="fix constant node before opening this case")
def test_extern_dnnl_mobilenet():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 3, 224, 224)
mod, params = relay.testing.mobilenet.get_workload(batch_size=1, dtype="float32")
mod["main"] = relay.build_module.bind_params_by_name(mod["main"], params)
mod = transform.AnnotateTarget("dnnl")(mod)
mod = transform.PartitionGraph()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_mod, params = relay.testing.mobilenet.get_workload(batch_size=1, dtype="float32")
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu(0)).evaluate()(
i_data, **params
)
check_result(mod, {"data": i_data}, (1, 1000), ref_res.numpy(), tol=1e-5, params=params)
def test_multiple_ends():
@tvm.ir.register_op_attr("nn.relu", "target.test")
def relu(expr):
return True
def before():
x = relay.var("x", shape=(10, 10))
r = relay.nn.relu(x)
a_1 = relay.abs(r)
a_ |
2 = relay.abs(r)
out = relay.add(a_1, a_2)
f = relay.Function([x], out)
mod = tvm.IRModule.from_expr(f)
return mod
def after():
x = relay.var("x", shape=(10, 10))
cb_1 = relay.annotation.compiler_begin(x, "test")
r = relay.nn.relu(cb_1)
ce_1 = relay.annotation.compiler_end(r, "test")
ce_2 = relay.annotation.compiler_end(r, "test")
cb_2 = relay.annotation.compiler_begin(ce_1, "default")
cb_3 = relay.annotation.compiler_begin(ce_2, "default")
a_1 = relay.abs(cb_2)
a_2 = relay.abs(cb_3)
ce_3 = relay.annotation.compiler_end(a_1, "default")
ce_4 = relay.annotation.compiler_end(a_2, "default")
cb_4 = relay.annotation.compiler_begin(ce_3, "default")
cb_5 = relay.annotation.compiler_begin(ce_4, "default")
out = relay.add(cb_4, cb_5)
ce_6 = relay.annotation.compiler_end(out, "default")
f = relay.Function([x], ce_6)
mod = tvm.IRModule.from_expr(f)
return mod
for annotate_non_call_ops in [False, True]:
result = transform.AnnotateTarget("test", annotate_non_call_ops)(before())
expected = transform.InferType()(after())
assert tvm.ir.structural_equal(expected, result)
def test_type_propagation():
target = "test_type_propagation"
@tvm.ir.register_op_attr("nn.relu", "target." + target)
def relu(expr):
return expr.args[0].checked_type.dtype == "float32"
def before():
x = relay.var("x", shape=(10, 10))
r = relay.nn.relu(x)
out = relay.nn.relu(r)
f = relay.Function([x], out)
mod = tvm.IRModule.from_expr(f)
return mod
for annotate_non_call_ops in [False, True]:
assert transform.AnnotateTarget(target, annotate_non_call_ops)(before())
def test_ref_create_read_write():
target = "relu"
@tvm.ir.register_op_attr("nn.relu", "target." + target)
def annotate(expr):
return True
def before():
ref = relay |
.expr.RefCreate(relay.const(1.0))
r = relay.expr.RefWrite(ref, relay.nn.relu(relay.expr.RefRead(ref)))
return tvm.IRModule.from_expr(r)
def after(annotate_non_call_ops):
co = relay.const(1.0)
if annotate_non_call_ops:
co = relay.annotation.compiler_begin(co, "default")
ref = relay.expr.RefCreate(co)
ref1 = ref
if annotate_non_call_ops:
ref = relay.annotation.compiler_end(ref, "default")
ref = relay.annotation.compiler_begin(ref, "default")
ref1 = relay.annotation.compiler_end(ref1, "default")
ref1 = relay.annotation.compiler_begin(ref1, "default")
read = relay.expr.RefRead(ref1)
if annotate_non_call_ops:
read = relay.annotation.compiler_end(read, "default")
beg = relay.annotation.compiler_begin(read, target)
relu = relay.nn.relu(beg)
end = relay.annotation.compiler_end(relu, target)
if annotate_non_call_ops:
end = relay.annotation.compiler_begin(end, "default")
r = relay.expr.RefWrite(ref, end)
if annotate_non_call_ops:
r = relay.annotation.compiler_end(r, "default")
return tvm.IRModule.from_expr(r)
for annotate_non_call_ops in [True, False, True]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
expected = transform.InferType()(after(annotate_non_call_ops))
assert tvm.ir.structural_equal(expected, result)
def test_tuple():
target = "test_tuple"
@tvm.ir.register_op_attr("nn.relu", "target." + target)
def relu(expr):
return True
@tvm.ir.register_op_attr("concatenate", "target." + target)
def concatenate(expr):
return True
"""Test that TupleNode is included in annotation when surrounded by supported nodes."""
def before():
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(10, 5))
a_1 = relay.nn.relu(x)
a_2 = relay.nn.relu(y)
out |
= relay.concatenate((a_1, a_2), axis=1)
f = relay.Function([x, y], out)
mod = tvm.IRModule.from_expr(f)
return mod
def after(annotate_non_call_ops):
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(10, 5))
cb_1 = relay.annotation.compiler_begin(x, target)
cb_2 = relay.annotation.compiler_begin(y, target)
a_1 = relay.nn.relu(cb_1)
a_2 = relay.nn.relu(cb_2)
ce_1 = relay.annotation.compiler_end(a_1, target)
ce_2 = relay.annotation.compiler_end(a_2, target)
if annotate_non_call_ops:
cb_3 = relay.annotation.compiler_begin(ce_1, target)
cb_4 = relay.annotation.compiler_begin(ce_2, target)
tup = relay.Tuple([cb_3, cb_4])
ce_3 = relay.annotation.compiler_end(tup, target)
else:
ce_3 = relay.Tuple([ce_1, ce_2])
cb_3 = relay.annotation.compiler_begin(ce_3, target)
out = relay.op._make.concatenate(cb_3, 1)
ce_4 = relay.annotation.compiler_end(out, target)
f = relay.Function([x, y], ce_4)
mod = tvm.IRModule.from_expr(f)
return mod
for annotate_non_call_ops in [False, True]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
expected = transform.InferType()(after(annotate_non_call_ops))
assert tvm.ir.structural_equal(expected, result)
def test_composite_function():
def before():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
in_1 = relay.var("in_1", shape=(10, 10))
in_2 = relay.var("in_2", shape=(10, 10))
add_node = relay.add(in_1, in_2)
relu_node = relay.nn.relu(add_node)
add_relu = relay.Function([in_1, in_2], relu_node)
add_relu = add_relu.with_attr("Composite", "test.add_relu")
r = relay.Call(add_relu, [a, b])
f = relay.Function([a, b], r)
mod = tvm.IRModule.from_expr(f)
return mod
def after(): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.