text
stringlengths 1
2.05k
|
---|
import relay, te
from tvm.contrib |
import graph_executor
from tvm.relay |
import transform
from tvm.relay.testing |
import run_infer_type
from tvm.relay.testing.temp_op_attr |
import TempOpAttr
def get_ref_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups,
channels=None,
):
casted_data = relay.op.cast(data, "int32")
casted_kernel = relay.op.cast(kernel, "int32")
shifted_data = relay.op.subtract(casted_data, relay.const(input_zero_point, "int32"))
shifted_kernel = relay.op.subtract(casted_kernel, relay.const(kernel_zero_point, "int32"))
func = relay.op.nn.conv2d_transpose(
shifted_data,
shifted_kernel,
padding=padding,
strides=strides,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
out_dtype=out_dtype,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
func = relay.Function(relay.analysis.free_vars(func), func)
return func
def get_qnn_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
channels,
groups,
):
func = relay.qnn.op.conv2d_transpose(
data,
kernel,
input_zero_point=relay.const(input_zero_point, "int32"),
kernel_zero_point=relay.const(kernel_zero_point, "int32"),
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
kernel_size=kernel_size,
strides=strides,
dilation=dilation,
padding=padding,
out_dtype=out_dtype,
groups=groups,
channels=channels,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
return mod
def get_funcs(
data_shape,
data_dtype,
kernel_shape,
kernel_dtype, |
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups=1,
channels=None,
):
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
ref_func = get_ref_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups,
channels,
)
ref_func = run_infer_type(ref_func)
ref_func = tvm.IRModule.from_expr(ref_func)
qnn_func = get_qnn_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
channels,
groups,
)
return (ref_func, qnn_func)
def verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype):
def get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype):
low = -128
high = 127
if data_dtype == "uint8":
low = 0
high = 255
golden_data = np.random.randint(low=low, high=high, size=data_shape).astype(data_dtype)
low = -128
high = 127
if kernel_dtype == "uint8":
low = 0
high = 255
golden_weight = np.random.randint(low=low, high=high, size=kernel_shape).astype(
kernel_dtype
)
return (golden_data, golden_weight)
def get_output(func, golden_inputs):
with tvm.transform.PassContext(opt_level=2):
golden_data, golden_weight = golden_inputs
params = {"kernel": golden_weight}
libs = relay.build(func, |
"llvm", params=params)
mod = graph_executor.create(libs.graph_json, libs.lib, device=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**libs.params)
mod.run()
res = mod.get_output(0).numpy()
return res
golden_inputs = get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype)
golden_output = get_output(ref_func, golden_inputs)
qnn_output = get_output(qnn_func, golden_inputs)
np.testing.assert_equal(qnn_output, golden_output)
def test_no_zero_point():
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (1, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 1, 2, 4)
data_dtype = "int8"
kernel_shape = (1, 3, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_kernel_zero_point():
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uin |
t8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=1,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 1, 2, 4)
data_dtype = "int8"
kernel_shape = (1, 3, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=5,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_input_zero_point():
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "int8"
r |
ef_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_both_zero_point():
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_different_dtype():
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_ |
dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
channels=kernel_shape[1],
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
channels=kernel_shape[1],
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_layout():
data_shape = (2, 2, 4, 4)
data_dtype = "uint8"
kernel_shape = (2, 2, 3, 4)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 2, 4, 3)
data_dtype = |
"uint8"
kernel_shape = (2, 2, 1, 3)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_padding():
data_shape = (1, 4, 2, 2)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=5,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 2, 4, 4)
data_dtype = "uint8"
kernel_shape = (2, 2, 3, 4)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 8, 6, 4)
data_dtype = "uint |
8"
kernel_shape = (2, 2, 3, 4)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1, 2, 2),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_const_folding():
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
golden_weight = np.random.randint(low=0, high=255, size=kernel_shape).astype(kernel_dtype)
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.const(golden_weight)
qnn_func = get_qnn_func(
data,
kernel,
input_zero_point=8,
kernel_zero_point=3,
kernel_size=(2, 2),
input_scale=1.0,
kernel_scale=1.0,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
channels=kernel_shape[1],
groups=1,
)
folded_mod = transform.FoldConstant()(qnn_func)
folded_func = folded_mod["main"]
assert "reshape" not in folded_func.astext()
def test_broadcast_layout():
data_shape = (1, 229, 229, 3)
data_dtype = "uint8"
kernel_shape = (7, 7, 64, 3)
kernel_dtype = "int8"
_, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(7, 7),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1), |
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
func = qnn_func["main"].body
bias = relay.var("bias", shape=(64,), dtype="int32")
bias2 = relay.var("bias2", shape=(1, 233, 233, 64), dtype="int32")
func = relay.add(func, bias2)
func = relay.add(bias2, func)
func = relay.add(bias, func)
func = relay.add(func, bias)
func = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3):
libs = relay.build(mod, "llvm -mcpu=skylake-avx512")
def test_non_scalar_input_scale_zp():
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (1, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=[0],
kernel_zero_point=0,
input_scale=[1.0],
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_per_channel_kernel_scale():
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (1, 3, 2, 2)
kernel_dtype = "uint8"
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
kernel_scales = [2, 2, 2]
kernel_scales = relay.const(np.array(kernel_scales).astype("float32"))
func = relay.qnn.op.conv2d_transpose(
data,
kernel,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(2.0, "float32"),
kernel_scale=kernel_scales,
kernel_size=(2, 2),
channels=kernel_shape[0],
padding=(0, 0), |
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
if __name__ == "__main__":
test_no_zero_point()
test_input_zero_point()
test_kernel_zero_point()
test_both_zero_point()
test_different_dtype()
test_layout()
test_padding()
test_const_folding()
test_broadcast_layout()
test_per_channel_kernel_scale() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import relay
from tvm.relay |
import transform
from tvm.relay.testing |
import run_infer_type
from tvm.contrib |
import graph_executor
from tvm.relay.testing.temp_op_attr |
import TempOpAttr
def legalize_qnn_conv2d(attrs, inputs, types):
return None
def get_ref_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups,
channels=None,
):
if isinstance(input_zero_point, (int, float)):
input_zero_point = relay.const(input_zero_point, "int32")
if isinstance(kernel_zero_point, (int, float)):
kernel_zero_point = relay.const(kernel_zero_point, "int32")
else:
if kernel_layout == "OIHW":
kernel_zero_point = relay.reshape(kernel_zero_point, [-1, 1, 1, 1])
elif kernel_layout == "HWOI":
kernel_zero_point = relay.reshape(kernel_zero_point, [1, 1, -1, 1])
casted_data = relay.op.cast(data, "int32")
casted_kernel = relay.op.cast(kernel, "int32")
shifted_data = relay.op.subtract(casted_data, input_zero_point)
shifted_kernel = relay.op.subtract(casted_kernel, kernel_zero_point)
func = relay.op.nn.conv2d(
shifted_data,
shifted_kernel,
padding=padding,
strides=strides,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
out_dtype=out_dtype,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
func = relay.Function(relay.analysis.free_vars(func), func)
return func
def get_qnn_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
channels,
groups,
):
if isinstance(input_zero_point, (int, float)):
input_zero_point = relay.const(input_zero_point, "int32")
if isinstance(kernel_zero_point, (int, float)):
kernel_zero_point = relay.const(kernel_zero_point, "int32")
func = relay.qnn.op.conv2d(
data, |
kernel,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
kernel_size=kernel_size,
strides=strides,
dilation=dilation,
padding=padding,
out_dtype=out_dtype,
groups=groups,
channels=channels,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
return mod
def get_funcs(
data_shape,
data_dtype,
kernel_shape,
kernel_dtype,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups=1,
channels=None,
):
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
ref_func = get_ref_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups,
channels,
)
ref_func = run_infer_type(ref_func)
ref_func = tvm.IRModule.from_expr(ref_func)
qnn_func = get_qnn_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
channels,
groups,
)
return (ref_func, qnn_func)
def verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype):
def get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype):
low = -128
high = 127 |
if data_dtype == "uint8":
low = 0
high = 255
golden_data = np.random.randint(low=low, high=high, size=data_shape).astype(data_dtype)
low = -128
high = 127
if kernel_dtype == "uint8":
low = 0
high = 255
golden_weight = np.random.randint(low=low, high=high, size=kernel_shape).astype(
kernel_dtype
)
return (golden_data, golden_weight)
def get_output(func, golden_inputs):
with tvm.transform.PassContext(opt_level=2):
golden_data, golden_weight = golden_inputs
params = {"kernel": golden_weight}
graph, lib, params = relay.build(func, "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
res = mod.get_output(0).numpy()
return res
golden_inputs = get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype)
golden_output = get_output(ref_func, golden_inputs)
qnn_output = get_output(qnn_func, golden_inputs)
np.testing.assert_equal(qnn_output, golden_output)
def test_no_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_ |
dtype, kernel_shape, kernel_dtype)
data_shape = (2, 1, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_kernel_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=1,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 1, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_ |
point=5,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_input_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_both_zero_point():
with |
TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_dynamic_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
input_zero_point = relay.op.multiply(
relay.const(2, dtype="int32"), relay.const(2, dtype="int32")
)
kernel_zero_point = relay.const(np.random.randint(10, size=[3]), "int32") |
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_layout():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 2, 4, 4)
data_dtype = "uint8"
kernel_shape = (2, 2, 4, 3)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
paddi |
ng=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 2, 4, 3)
data_dtype = "uint8"
kernel_shape = (2, 2, 3, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
groups=3,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_padding():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (1, 4, 2, 2)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=5,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 2, 4, 4)
data_dtype = "uint8"
kernel_shape = (2, 2, 4, 3)
kernel_dtype = " |
uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 2, 4, 4)
data_dtype = "uint8"
kernel_shape = (2, 2, 4, 3)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1, 2, 2),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_dilation():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 4, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1), |
dilation=(2, 2),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 4, 4, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(2, 2),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_const_folding():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
golden_weight = np.random.randint(low=0, high=255, size=kernel_shape).astype(kernel_dtype)
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.const(golden_weight)
qnn_func = get_qnn_func(
data,
kernel,
input_zero_point=8,
kernel_zero_point=3,
kernel_size=(2, 2),
input_scale=1.0,
kernel_scale=1.0,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
channels=kernel_shape[0],
groups=1,
)
folded_mod = transform.FoldConstant()(qnn_func)
folded_func = folded_mod["main"]
assert "reshape" not in folded_func.as |
text()
def test_kernel_size_1x1():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 1, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
assert "avg_pool2d" not in qnn_func.astext()
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_kernel_size_1x1_strides_2():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 1, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(1, 1),
padding=(0, 0),
strides=(2, 2),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
assert "avg_pool2d" not in qnn_func.astext()
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_tflite_large_irregular():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (1, 1024, 1, 1)
data_dtype = "uint8" |
kernel_shape = (1001, 1024, 1, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=127,
kernel_zero_point=127,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
golden_data = np.full(data_shape, 127).astype("uint8")
golden_weight = np.full(kernel_shape, 127).astype("uint8")
with tvm.transform.PassContext(opt_level=2):
params = {"kernel": golden_weight}
graph, lib, params = relay.build(qnn_func, "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
qnn_output = mod.get_output(0).numpy()
golden_output = np.full((1, 1001, 1, 1), 0).astype("uint8")
np.testing.assert_equal(qnn_output, golden_output)
def test_tflite_output_multiplier_greater_than_one():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_scale=1.0,
kernel_scale=1.0,
input_zero_point=128,
kernel_zero_point=128,
kernel_size=(2, 2),
padding=(0, 0),
strides=(2, 2),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW", |
out_dtype="int32",
)
golden_data = 128 + np.array((1, 1, 1, 1, 2, 2, 2, 2, 1, 2, 3, 4, 1, 2, 3, 4)).reshape(
data_shape
).astype("uint8")
golden_weight = 128 + np.array((1, 2, 3, 4, -1, 1, -1, 1, -1, -1, 1, 1)).reshape(
kernel_shape
)
golden_weight = golden_weight.astype("uint8")
with tvm.transform.PassContext(opt_level=2):
params = {"kernel": golden_weight}
graph, lib, params = relay.build(qnn_func, "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
qnn_output = mod.get_output(0).numpy()
golden_output = np.array((17, 17, 0, 0, 2, 2, 16, 36, 2, 2, 0, 0)).reshape(2, 3, 1, 2)
np.testing.assert_equal(qnn_output, golden_output)
def test_tflite_anistropic_strides():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (1, 1, 3, 6)
data_dtype = "uint8"
kernel_shape = (1, 1, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=127,
kernel_zero_point=127,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 3),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
golden_data = np.array(
(
133,
131,
129,
125,
123,
121,
135,
133,
131,
123,
121,
119,
137, |
135,
133,
121,
119,
117,
)
).reshape(data_shape)
golden_data = golden_data.astype("uint8")
golden_weight = np.array((129, 131, 133, 135)).reshape(kernel_shape)
golden_weight = golden_weight.astype("uint8")
with tvm.transform.PassContext(opt_level=2):
params = {"kernel": golden_weight}
graph, lib, params = relay.build(qnn_func, "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
qnn_output = mod.get_output(0).numpy()
golden_output = np.array((124, -92, 164, -132)).reshape(1, 1, 2, 2)
np.testing.assert_equal(qnn_output, golden_output)
def test_broadcast_layout():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (1, 229, 229, 3)
data_dtype = "uint8"
kernel_shape = (7, 7, 3, 64)
kernel_dtype = "int8"
_, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(7, 7),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
func = qnn_func["main"].body
bias = relay.var("bias", shape=(64,), dtype="int32")
bias2 = relay.var("bias2", shape=(1, 225, 225, 1), dtype="int32")
func = relay.add(func, bias2)
func = relay.add(bias2, func)
func = relay.add(bias, func)
func = relay.add(func, bias)
func = relay.Function(relay.analysis.free_vars(func), func) |
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm -mcpu=skylake-avx512")
def test_depthwise_depth_multiplier():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 16, 16)
data_dtype = "uint8"
kernel_shape = (4, 1, 3, 3)
kernel_dtype = "uint8"
input_zero_point = relay.op.multiply(
relay.const(2, dtype="int32"), relay.const(2, dtype="int32")
)
kernel_zero_point = relay.const(np.random.randint(10, size=[4]), "int32")
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
groups=4,
channels=4,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (10, 4, 16, 16)
data_dtype = "uint8"
kernel_shape = (4, 2, 3, 3)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32", |
groups=4,
channels=8,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 16, 16, 4)
data_dtype = "uint8"
kernel_shape = (3, 3, 4, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
groups=4,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 16, 16, 4)
data_dtype = "uint8"
kernel_shape = (3, 3, 4, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
groups=4,
channels=8,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_per_channel_kernel_scale():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = |
"uint8"
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
kernel_scales = [2, 2, 2]
kernel_scales = relay.const(np.array(kernel_scales).astype("float32"))
func = relay.qnn.op.conv2d(
data,
kernel,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(2.0, "float32"),
kernel_scale=kernel_scales,
kernel_size=(2, 2),
channels=kernel_shape[0],
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
if __name__ == "__main__":
test_no_zero_point()
test_input_zero_point()
test_kernel_zero_point()
test_both_zero_point()
test_layout()
test_padding()
test_dilation()
test_const_folding()
test_kernel_size_1x1()
test_kernel_size_1x1_strides_2()
test_tflite_large_irregular()
test_broadcast_layout()
test_tflite_output_multiplier_greater_than_one()
test_tflite_anistropic_strides()
test_depthwise_depth_multiplier()
test_per_channel_kernel_scale() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import relay
from tvm.contrib |
import graph_executor
from tvm.relay.testing.temp_op_attr |
import TempOpAttr
def legalize_qnn_dense(attrs, inputs, types):
return None
def make_requantize_params(input_scale, output_scale, output_zero_point, out_dtype):
config = {
"input_scale": input_scale,
"output_scale": output_scale,
"output_zero_point": output_zero_point,
"out_dtype": out_dtype,
}
return config
def make_configuration(
quantized_data,
quantized_kernel,
dtype,
input_shape,
kernel_shape,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
units,
output,
out_dtype="int32",
bias=None,
requantize=None,
):
if requantize is not None:
assert bias is not None
config = {
"quantized_data": quantized_data,
"quantized_kernel": quantized_kernel,
"dtype": dtype,
"input_shape": input_shape,
"kernel_shape": kernel_shape,
"input_zero_point": input_zero_point,
"kernel_zero_point": kernel_zero_point,
"input_scale": input_scale,
"kernel_scale": kernel_scale,
"units": units,
"output": output,
"out_dtype": out_dtype,
"bias": bias,
"requantize": requantize,
}
return config
def make_int_configuration(use_bias=False, requantize_output=False, per_channel=False):
input_shape, kernel_shape, output_shape = (2, 10), (3, 10), (2, 3)
input_zero_point, kernel_zero_point = -1, -1
in_dtype = "int8"
out_dtype = "int32" if not requantize_output else "int8"
units = 3
quantized_data_np = (
np.array([1, 3, 5, 7, 9, 11, 13, 15, -19, -21, 1, 3, 5, 7, 9, 11, 13, -17, 17, -21])
.astype(in_dtype)
.reshape(input_shape)
)
quantized_kernel_np = (
np.array(
[
1,
3,
5,
7,
9,
11,
13,
15,
17,
19,
1,
3,
5, |
7,
9,
11,
13,
15,
17,
19,
1,
3,
5,
7,
9,
11,
13,
15,
17,
19,
]
)
.astype(in_dtype)
.reshape(kernel_shape)
)
input_scale = 0.5
kernel_scale = 0.5
output_scale = 1.0
bias = np.array([4, 8, 12]).astype(out_dtype).reshape((units,)) if use_bias else None
if per_channel:
assert use_bias and requantize_output
kernel_scale = np.array([0.5, 0.3, 0.4], dtype=np.float32)
output = np.array([23, 14, 20, 57, 34, 47])
elif requantize_output:
assert use_bias
output = np.array([23, 24, 25, 57, 58, 59])
elif use_bias:
output = np.array([96, 100, 104, 232, 236, 240])
else:
output = np.array([92, 92, 92, 228, 228, 228])
requant_params = (
make_requantize_params(input_scale * kernel_scale, output_scale, -1, "int8")
if requantize_output
else None
)
output = output.astype(out_dtype).reshape(output_shape)
return make_configuration(
quantized_data=quantized_data_np,
quantized_kernel=quantized_kernel_np,
dtype=in_dtype,
input_shape=input_shape,
kernel_shape=kernel_shape,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=input_scale,
kernel_scale=kernel_scale,
units=units,
output=output,
bias=bias,
requantize=requant_params,
)
def qnn_dense_driver(test_configuration):
in_dtype = test_configuration["dtype"]
out_dtype = test_configuration["out_dtype"]
quantized_data_name = "quantized_data"
quantized_kernel_name = "quantized_kernel"
expected_out_dtype = test_configuration["out_dtype"]
bias_name = "bias"
quantized_data = relay.var(
quantized |
_data_name, shape=test_configuration["input_shape"], dtype=in_dtype
)
quantized_kernel = relay.var(
quantized_kernel_name, shape=test_configuration["kernel_shape"], dtype=in_dtype
)
mod = relay.qnn.op.dense(
quantized_data,
quantized_kernel,
relay.const(test_configuration["input_zero_point"], "int32"),
relay.const(test_configuration["kernel_zero_point"], "int32"),
relay.const(test_configuration["input_scale"], "float32"),
relay.const(test_configuration["kernel_scale"], "float32"),
test_configuration["units"],
)
if test_configuration[bias_name] is not None:
bias = relay.var(bias_name, shape=test_configuration["bias"].shape, dtype=out_dtype)
mod = relay.nn.bias_add(mod, bias)
if test_configuration["requantize"] is not None:
requantize_config = test_configuration["requantize"]
mod = relay.qnn.op.requantize(
mod,
input_scale=relay.const(requantize_config["input_scale"], "float32"),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(requantize_config["output_scale"], "float32"),
output_zero_point=relay.const(requantize_config["output_zero_point"], "int32"),
out_dtype=requantize_config["out_dtype"],
)
expected_out_dtype = requantize_config["out_dtype"]
mod = relay.Function(relay.analysis.free_vars(mod), mod)
mod = tvm.IRModule.from_expr(mod)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
with tvm.transform.PassContext(opt_level=2):
graph, lib, params = relay.build(mod, "llvm", params=None)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input(quantized_data_name, test_configuration[quantized_data_name])
mod.set_input(quantized_kernel_name, test_configuration[quantized_kernel_name])
if test_configuration[bias_name] is not None:
mod.set_input(bias_name, test_configur |
ation[bias_name])
mod.set_input(**params)
mod.run()
res = mod.get_output(0).numpy()
np.testing.assert_equal(res, test_configuration["output"])
assert res.dtype == expected_out_dtype
def test_qnn_dense_without_bias():
with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):
int32_output_without_bias_params = make_int_configuration(use_bias=False)
qnn_dense_driver(int32_output_without_bias_params)
def test_qnn_dense_with_bias():
with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):
int32_output_with_bias_params = make_int_configuration(use_bias=True)
qnn_dense_driver(int32_output_with_bias_params)
def test_qnn_dense_with_requantized_output():
with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):
int8_requantized_output_with_bias_params = make_int_configuration(
use_bias=True, requantize_output=True
)
qnn_dense_driver(int8_requantized_output_with_bias_params)
def test_per_channel_weight_scale():
with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):
config = make_int_configuration(use_bias=True, requantize_output=True, per_channel=True)
qnn_dense_driver(config)
if __name__ == "__main__":
test_qnn_dense_without_bias()
test_qnn_dense_with_bias()
test_qnn_dense_with_requantized_output()
test_per_channel_weight_scale() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import relay
from tvm.contrib |
import graph_executor
from tvm.relay.testing |
import run_infer_type
def dequantize_test_driver(in_dtype, quant_args, in_data, verify_output_data, axis):
shape = in_data.shape
input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
input_zero_point = relay.const(quant_args["in_zero_point"], "int32")
input_scale = relay.const(quant_args["in_scale"], "float32")
quantized_output = relay.qnn.op.dequantize(
input_data, input_scale=input_scale, input_zero_point=input_zero_point, axis=axis
)
mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input(input_data=in_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
np.testing.assert_equal(res, verify_output_data)
assert res.dtype == np.float32
def test_uint8_to_float32():
data = np.array([0, 1, 2, 3, 4, 251, 252, 253, 254, 255]).astype("uint8").reshape((2, 5))
output = (
np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64])
.astype("float32")
.reshape((2, 5))
)
quant_args = {"in_zero_point": 127, "in_scale": 0.5}
dequantize_test_driver(
in_dtype="uint8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_int8_to_float32():
data = (
np.array([-128, -127, -126, -125, -124, 123, 124, 125, 126, 127])
.astype("int8")
.reshape((2, 5))
)
output = (
np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64])
.astype("float32")
.reshape((2, 5))
)
quant_args = {"in_zero_point": -1, "in_scale": 0.5}
dequantize_test_driver(
in_dtype="int8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_scalar_int8_to_float32():
da |
ta = np.array(-128).astype("int8")
output = np.array(-63.5).astype("float32")
quant_args = {"in_zero_point": -1, "in_scale": 0.5}
dequantize_test_driver(
in_dtype="int8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_int32_to_float32():
data = np.array([113, 29, -1052]).astype("int32")
output = np.array([0.6550452, 0.16810896, -6.098297]).astype("float32")
quant_args = {"in_zero_point": 0, "in_scale": 0.0057968604}
dequantize_test_driver(
in_dtype="int32", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_channelwise_axis_1():
data = np.transpose(
np.array([0, 1, 2, 3, 4, 243, 247, 249, 250, 251]).astype("uint8").reshape((2, 5))
)
output = np.transpose(
np.array([-63.5, -63, -62.5, -62, -61.5, 30, 31, 31.5, 31.75, 32])
.astype("float32")
.reshape((2, 5))
)
quant_args = {
"in_zero_point": np.array([127, 123]).astype("int32"),
"in_scale": np.array([0.5, 0.25]).astype("float32"),
}
dequantize_test_driver(
in_dtype="uint8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_channelwise_axis_0():
data = np.array([0, 1, 2, 3, 4, 243, 247, 249, 250, 251]).astype("uint8").reshape((2, 5))
output = (
np.array([-63.5, -63, -62.5, -62, -61.5, 30, 31, 31.5, 31.75, 32])
.astype("float32")
.reshape((2, 5))
)
quant_args = {
"in_zero_point": np.array([127, 123]).astype("int32"),
"in_scale": np.array([0.5, 0.25]).astype("float32"),
}
dequantize_test_driver(
in_dtype="uint8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=0
)
def test_per_tensor_vector_args():
data = np.array([0, 1, 2, 3, 4, 251, 252, 253, 254, 255]).astype("uint8")
output = np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64]).astype("float32")
quant_args = {
"in_zero_point": np.array([12 |
7]).astype("int32"),
"in_scale": np.array([0.5]).astype("float32"),
}
dequantize_test_driver(
in_dtype="uint8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_dynamic_dequantize():
x = relay.var("x", shape=(1, 2, 3, 4), dtype="int8")
scale_var = relay.var("scale", shape=(), dtype="float32")
zp_var = relay.var("zp", shape=(), dtype="int32")
deq_x = relay.qnn.op.dequantize(x, scale_var * scale_var, zp_var + zp_var)
tt = run_infer_type(deq_x)
assert tt.checked_type == relay.TensorType((1, 2, 3, 4), "float32")
func = relay.Function([x, scale_var, zp_var], deq_x)
data = np.random.uniform(size=(1, 2, 3, 4)).astype("int8")
scale = np.array(1).astype("float32")
zp = np.array(0).astype("int32")
mod = tvm.ir.IRModule.from_expr(func)
for target, dev in tvm.testing.enabled_targets():
with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]):
lib = relay.build(mod, target=target)
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input(**{"x": data, "scale": scale, "zp": zp})
module.run()
if __name__ == "__main__":
test_uint8_to_float32()
test_int8_to_float32()
test_scalar_int8_to_float32()
test_int32_to_float32()
test_channelwise_axis_1()
test_channelwise_axis_0()
test_dynamic_dequantize() |
import tvm |
import numpy as np
from tvm |
import relay
def dequantize(data, scale, zp):
return scale * (np.asarray(data) - zp)
def generate_golden_output(x_data, dequantized_x, alpha, o_scale, o_zero_point, i_zero_point):
prod = np.multiply(dequantized_x, alpha)
prod = np.around(prod / o_scale + o_zero_point)
q_min = np.iinfo(np.uint8).min
q_max = np.iinfo(np.uint8).max
prod = np.clip(prod, q_min, q_max)
requantized = np.clip(np.round(dequantized_x / o_scale + o_zero_point), q_min, q_max)
output = np.where(x_data < i_zero_point, prod, requantized)
return output
def test_qnn_leaky_relu():
data_dtype = "uint8"
input_scale = 0.125
input_zero_point = 60
output_scale = 0.6
output_zero_point = 17
alpha = 0.9
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.qnn.op.leaky_relu(
x=x,
alpha=alpha,
input_scale=relay.const(input_scale, "float32"),
input_zero_point=relay.const(input_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 133, 0, 9)).reshape((1, 4))
x_dequantized = dequantize(x_data, input_scale, input_zero_point)
golden_output = generate_golden_output(
x_data, x_dequantized, alpha, output_scale, output_zero_point, input_zero_point
)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(x_data)
np.testing.assert_equal(op_res.numpy(), golden_output)
if __name__ == "__main__":
test_qnn_leaky_relu() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import relay
from tvm.contrib |
import graph_executor |
import tvm.topi.testing
def recover(data, scale, zp):
return scale * (np.asarray(data) - zp)
def generate_golden_output(x_recovered, y_recovered, scale, zp):
mul = x_recovered * y_recovered
output = np.around(mul / scale + zp)
q_min = np.iinfo(np.uint8).min
q_max = np.iinfo(np.uint8).max
return np.clip(output, q_min, q_max)
def test_tflite_same_io_qnn_params():
data_dtype = "uint8"
lhs_scale = rhs_scale = output_scale = 0.00784314
lhs_zero_point = rhs_zero_point = output_zero_point = 127
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_datas = [
np.array((1, 153, 2, 178)).reshape((1, 4)),
np.array((25, 1, 178, 216)).reshape((1, 4)),
np.array((25, 153, 1, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 1, 8)).reshape((1, 4)),
np.array((204, 178, 191, 1)).reshape((1, 4)),
np.array((204, 178, 1, 191)).reshape((1, 4)),
]
for i in range(0, 3):
x_data = x_datas[i]
y_data = y_datas[i]
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
) |
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
def test_tflite_different_io_qnn_params():
data_dtype = "uint8"
lhs_scale = 0.0156863
lhs_zero_point = 127
rhs_scale = 0.0117647
rhs_zero_point = 85
output_scale = 0.0235294
output_zero_point = 128
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
for i in range(0, 3):
x_data = x_datas[i]
y_data = y_datas[i]
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
def test_saturation():
data_dtype = "uint8"
lhs_scale = rhs_scale = output_scale = 0.125
lhs_zero_point = rhs_zero_point = output_zero_point = |
0
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 128, 0)).reshape((1, 4))
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
lhs_scale = rhs_scale = 0.125
output_scale = 0.25
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 127, 0)).reshape((1, 4))
x_rec = recover(x_data, lhs_scale |
, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
lhs_scale = 0.5
rhs_scale = 0.25
output_scale = 0.125
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 0, 1, 0)).reshape((1, 4))
y_data = np.array((0, 128, 64, 0)).reshape((1, 4))
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import relay
from tvm.contrib |
import graph_executor
from tvm.relay.testing |
import run_infer_type
def quantize_test_driver(in_dtype, quant_args, axis, out_dtype, in_data, verify_output_data):
shape = in_data.shape
input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
output_zero_point = relay.const(quant_args["out_zero_point"])
output_scale = relay.const(quant_args["out_scale"])
quantized_output = relay.qnn.op.quantize(
input_data,
output_scale=output_scale,
output_zero_point=output_zero_point,
axis=axis,
out_dtype=out_dtype,
)
mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input(input_data=in_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
np.testing.assert_equal(res, verify_output_data)
assert res.dtype == out_dtype
def test_float32_to_uint8():
data = (
np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64])
.astype("float32")
.reshape((2, 5))
)
output = np.array([0, 1, 2, 3, 4, 251, 252, 253, 254, 255]).astype("uint8").reshape((2, 5))
quant_args = {"out_zero_point": np.int32(127), "out_scale": np.float32(0.5)}
quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="uint8",
in_data=data,
verify_output_data=output,
)
def test_float32_to_int8():
data = (
np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64])
.astype("float32")
.reshape((2, 5))
)
output = (
np.array([-128, -127, -126, -125, -124, 123, 124, 125, 126, 127])
.astype("int8")
.reshape((2, 5))
)
quant_args = {"out_zero_point": np.int32(-1), "out_scale": np.float32(0.5)}
quantize_test_ |
driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="int8",
in_data=data,
verify_output_data=output,
)
def test_scalar_float32_to_int8():
data = np.array(-63.5).astype("float32")
output = np.array(-128).astype("int8")
quant_args = {"out_zero_point": np.int32(-1), "out_scale": np.float32(0.5)}
quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="int8",
in_data=data,
verify_output_data=output,
)
def test_channelwise_axis_0():
data = (
np.array([-63.5, -63, -62.5, -62, -61.5, 30, 31, 31.5, 31.75, 32])
.astype("float32")
.reshape((2, 5))
)
output = np.array([0, 1, 2, 3, 4, 243, 247, 249, 250, 251]).astype("uint8").reshape((2, 5))
quant_args = {
"out_zero_point": np.array([127, 123]).astype("int32"),
"out_scale": np.array([0.5, 0.25]).astype("float32"),
}
quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=0,
out_dtype="uint8",
in_data=data,
verify_output_data=output,
)
def test_channelwise_axis_1():
data = np.transpose(
np.array([-63.5, -63, -62.5, -62, -61.5, 30, 31, 31.5, 31.75, 32])
.astype("float32")
.reshape((2, 5))
)
output = np.transpose(
np.array([0, 1, 2, 3, 4, 243, 247, 249, 250, 251]).astype("uint8").reshape((2, 5))
)
quant_args = {
"out_zero_point": np.array([127, 123]).astype("int32"),
"out_scale": np.array([0.5, 0.25]).astype("float32"),
}
quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="uint8",
in_data=data,
verify_output_data=output,
)
def test_dynamic_quantize():
x = relay.var("x", shape=(1, 2, 3, 4), dtype="float32")
scale_var = relay.var("scale", shape=(), dtype="float32")
zp_var = relay.var("zp", shape=(), dtype="int |
32")
q_x = relay.qnn.op.quantize(x, scale_var * scale_var, zp_var + zp_var)
tt = run_infer_type(q_x)
assert tt.checked_type == relay.TensorType((1, 2, 3, 4), "int8")
func = relay.Function([x, scale_var, zp_var], q_x)
data = np.random.uniform(size=(1, 2, 3, 4)).astype("float32")
scale = np.array(1).astype("float32")
zp = np.array(0).astype("int32")
mod = tvm.ir.IRModule.from_expr(func)
for target, dev in tvm.testing.enabled_targets():
with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]):
lib = relay.build(mod, target=target)
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input(**{"x": data, "scale": scale, "zp": zp})
module.run()
if __name__ == "__main__":
test_float32_to_uint8()
test_float32_to_int8()
test_scalar_float32_to_int8()
test_channelwise_axis_0()
test_channelwise_axis_1()
test_dynamic_quantize() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import relay
from tvm.contrib |
import graph_executor
roundings = ["UPWARD", "TONEAREST"]
compute_dtypes = ["float32", "float64", "int64"]
def verify(mod, goldens, target="llvm"):
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, target, params=None)
golden_data, golden_output = goldens
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input("input_data", golden_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
np.testing.assert_equal(res, golden_output)
def get_mod(
data_shape,
data_dtype,
out_dtype,
input_scale,
output_scale,
input_zero_point=0,
output_zero_point=0,
rounding="None",
compute_dtype="None",
axis=0,
):
input_data = relay.var("input_data", shape=data_shape, dtype=data_dtype)
if isinstance(input_scale, float):
input_scale_expr = relay.const(input_scale, "float32")
else:
input_scale_expr = relay.const(np.array(input_scale).astype("float32"))
if isinstance(input_zero_point, float):
input_zero_point_expr = relay.const(input_zero_point, "int32")
else:
input_zero_point_expr = relay.const(np.array(input_zero_point).astype("int32"))
mod = relay.qnn.op.requantize(
input_data,
input_scale=input_scale_expr,
input_zero_point=input_zero_point_expr,
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
axis=axis,
rounding=rounding,
compute_dtype=compute_dtype,
out_dtype=out_dtype,
)
mod = relay.Function(relay.analysis.free_vars(mod), mod)
mod = tvm.IRModule.from_expr(mod)
return mod
def test_same_scale():
golden_data = np.arange(-100, 100, 1).astype("int32")
golden_output = golden_data
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(200,), |
data_dtype="int32",
out_dtype="int8",
input_scale=0.5,
output_scale=0.5,
rounding=rounding,
compute_dtype=compute_dtype,
)
assert "right_shift" not in mod.astext()
verify(mod, (golden_data, golden_output))
def test_scalar_same_scale():
golden_data = np.array(-10).astype("int32")
golden_output = golden_data
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(),
data_dtype="int32",
out_dtype="int8",
input_scale=0.5,
output_scale=0.5,
rounding=rounding,
compute_dtype=compute_dtype,
)
assert "right_shift" not in mod.astext()
verify(mod, (golden_data, golden_output))
def test_downscale():
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=1,
output_scale=16,
rounding=rounding,
compute_dtype=compute_dtype,
)
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.repeat([0, 1, 2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
golden_data = np.arange(0, -32, -1).astype("int32")
if rounding == "UPWARD":
golden_output = np.repeat([0, -1, -2], [9, 16, 7])
else:
golden_output = np.repeat([0, -1, -2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=1,
output_scale=4, |
rounding=rounding,
)
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.repeat([0, 1, 2, 3, 4, 5, 6, 7, 8], [2, 4, 4, 4, 4, 4, 4, 4, 2])
verify(mod, (golden_data, golden_output))
golden_data = np.arange(0, -32, -1).astype("int32")
if rounding == "UPWARD":
golden_output = np.repeat(
[0, -1, -2, -3, -4, -5, -6, -7, -8], [3, 4, 4, 4, 4, 4, 4, 4, 1]
)
else:
golden_output = np.repeat(
[0, -1, -2, -3, -4, -5, -6, -7, -8], [2, 4, 4, 4, 4, 4, 4, 4, 2]
)
verify(mod, (golden_data, golden_output))
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="uint8",
input_scale=1,
output_scale=16,
rounding=rounding,
)
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.repeat([0, 1, 2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
mod = get_mod(
data_shape=(32,),
data_dtype="uint8",
out_dtype="uint8",
input_scale=1,
output_scale=16,
rounding=rounding,
)
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.repeat([0, 1, 2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
def test_upscale():
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=2,
output_scale=1,
rounding=rounding, |
compute_dtype=compute_dtype,
)
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.multiply(2, golden_data)
verify(mod, (golden_data, golden_output))
golden_data = np.arange(0, -32, -1).astype("int32")
golden_output = np.multiply(2, golden_data)
verify(mod, (golden_data, golden_output))
def test_non_power_of_two():
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=1,
output_scale=3,
rounding=rounding,
compute_dtype=compute_dtype,
)
golden_data = np.multiply(np.arange(0, 32, 1).astype("int32"), 3)
golden_output = np.arange(0, 32, 1)
verify(mod, (golden_data, golden_output))
golden_data = np.multiply(np.arange(0, -32, -1).astype("int32"), 3)
golden_output = np.arange(0, -32, -1)
verify(mod, (golden_data, golden_output))
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=3,
output_scale=1,
rounding=rounding,
)
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.multiply(golden_data, 3)
verify(mod, (golden_data, golden_output))
golden_data = np.arange(0, -32, -1).astype("int32")
golden_output = np.multiply(golden_data, 3)
verify(mod, (golden_data, golden_output))
def test_saturation():
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(16,),
data_dt |
ype="int32",
out_dtype="int8",
input_scale=0.5,
output_scale=0.5,
rounding=rounding,
compute_dtype=compute_dtype,
)
golden_data = np.arange(0, 16, 1).astype("int32")
golden_data = np.add(120, golden_data)
output = np.array(
[120, 121, 122, 123, 124, 125, 126, 127, 127, 127, 127, 127, 127, 127, 127, 127]
)
golden_output = output
verify(mod, (golden_data, golden_output))
golden_data = np.arange(0, -16, -1).astype("int32")
golden_data = np.add(-120, golden_data)
output = np.array(
[
-120,
-121,
-122,
-123,
-124,
-125,
-126,
-127,
-128,
-128,
-128,
-128,
-128,
-128,
-128,
-128,
]
)
golden_output = output
verify(mod, (golden_data, golden_output))
def test_zero_point():
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=1,
output_scale=16,
output_zero_point=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.repeat([0, 1, 2], [8, 16, 8])
golden_output = np.add(1, golden_output)
verify(mod, (golden_data, golden_output))
golden_data = np.arange(-32, -64, -1).astype("int32" |
)
if rounding == "UPWARD":
golden_output = np.repeat([-2, -3, -4], [9, 16, 7])
else:
golden_output = np.repeat([-2, -3, -4], [8, 16, 8])
golden_output = np.add(1, golden_output)
verify(mod, (golden_data, golden_output))
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=1,
output_scale=16,
input_zero_point=16,
rounding=rounding,
compute_dtype=compute_dtype,
)
golden_data = np.arange(32, 64, 1).astype("int32")
golden_output = np.repeat([2, 3, 4], [8, 16, 8])
golden_output = np.subtract(golden_output, 1)
verify(mod, (golden_data, golden_output))
golden_data = np.arange(-32, -64, -1).astype("int32")
if rounding == "UPWARD":
golden_output = np.repeat([-2, -3, -4], [9, 16, 7])
else:
golden_output = np.repeat([-2, -3, -4], [8, 16, 8])
golden_output = np.subtract(golden_output, 1)
verify(mod, (golden_data, golden_output))
def test_per_channel_same_scale():
golden_data = np.arange(-5, 5, 1).astype("int32").reshape((5, 2))
golden_output = golden_data
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(5, 2),
data_dtype="int32",
out_dtype="int8",
input_scale=[0.5, 0.5],
output_scale=0.5,
axis=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
verify(mod, (golden_data, golden_output))
golden_data = np.arange(-10, 10, 1).astype("int32").reshape((2, 2, 5))
golden_output = golden_d |
ata
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(2, 2, 5),
data_dtype="int32",
out_dtype="int8",
input_scale=[0.5, 0.5],
output_scale=0.5,
axis=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
verify(mod, (golden_data, golden_output))
def test_per_channel_different_scale():
golden_data = np.arange(-5, 5, 1).astype("int32").reshape((5, 2))
golden_output = np.array([-5, -2, -3, -1, -1, 0, 1, 1, 3, 2]).reshape((5, 2))
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(5, 2),
data_dtype="int32",
out_dtype="int8",
input_scale=[0.5, 0.25],
output_scale=0.5,
axis=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
verify(mod, (golden_data, golden_output))
golden_data = np.arange(-20, 20, 2).astype("int32").reshape((2, 2, 5))
golden_output = np.array(
[-20, -18, -16, -14, -12, -5, -4, -3, -2, -1, 0, 2, 4, 6, 8, 5, 6, 7, 8, 9]
).reshape((2, 2, 5))
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(2, 2, 5),
data_dtype="int32",
out_dtype="int8",
input_scale=[0.5, 0.25],
output_scale=0.5,
axis=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
verify(mod, (golden_data, golden_output))
golden_data = np.arange(-5, 5, 1).astype("int32").reshape((5, 2))
golden_output = np.array([-10, -2, -6, -1, -2, 0, 2, 1, 6, 2]).reshape((5, 2))
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod |
= get_mod(
data_shape=(5, 2),
data_dtype="int32",
out_dtype="int8",
input_scale=[1.0, 0.25],
output_scale=0.5,
axis=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
verify(mod, (golden_data, golden_output))
def test_default_cfg_and_no_args():
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=1,
output_scale=16,
)
golden_data = np.arange(0, -32, -1).astype("int32")
golden_output = np.repeat([0, -1, -2], [9, 16, 7])
verify(mod, (golden_data, golden_output))
def test_non_default_cfg_and_no_args():
for rounding_cfg in roundings:
with relay.qnn.op.requantize_config(rounding=rounding_cfg):
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=1,
output_scale=16,
)
golden_data = np.arange(0, -32, -1).astype("int32")
if rounding_cfg == "UPWARD":
golden_output = np.repeat([0, -1, -2], [9, 16, 7])
else:
golden_output = np.repeat([0, -1, -2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
def test_default_cfg_and_args():
for rounding in roundings:
with relay.qnn.op.requantize_config(rounding="UPWARD"):
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=1,
output_scale=16,
rounding=rounding,
)
golden_data = np.arange(0, -32, -1).astype("int32")
if rounding == "UPWARD":
golden_output = np.repeat([0, -1, -2], [9, 16, 7])
else:
golden_output = np.repeat([0, -1, -2], [8, 16, 8])
verify(mod, (golden_data, gol |
den_output))
def test_non_default_cfg_and_args():
for rounding_arg in roundings:
for rounding_cfg in roundings:
with relay.qnn.op.requantize_config(rounding=rounding_cfg):
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=1,
output_scale=16,
rounding=rounding_arg,
)
golden_data = np.arange(0, -32, -1).astype("int32")
if rounding_arg == "UPWARD":
golden_output = np.repeat([0, -1, -2], [9, 16, 7])
else:
golden_output = np.repeat([0, -1, -2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
if __name__ == "__main__":
test_same_scale()
test_scalar_same_scale()
test_downscale()
test_upscale()
test_non_power_of_two()
test_saturation()
test_zero_point()
test_per_channel_same_scale()
test_per_channel_different_scale()
test_default_cfg_and_no_args()
test_non_default_cfg_and_no_args()
test_default_cfg_and_args()
test_non_default_cfg_and_args() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import relay
from tvm.contrib |
import graph_executor
from tvm.runtime.vm |
import VirtualMachine
from tvm.topi.nn.qnn |
import SQNN_DTYPE_TO_CODE
def dequantize_test_driver(in_dtype, quant_args, axis, in_data):
shape = in_data.shape
input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
input_zero_point = relay.const(quant_args["in_zero_point"])
input_scale = relay.const(quant_args["in_scale"])
dequantized_output = relay.qnn.op.dequantize(
input_data,
input_scale=input_scale,
input_zero_point=input_zero_point,
axis=axis,
)
mod = relay.Function(relay.analysis.free_vars(dequantized_output), dequantized_output)
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input(input_data=in_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
return res
def build_simulated_dequantize(input_data, scale, zp, dtype, axis=-1):
sim_q = relay.qnn.op.simulated_dequantize(
input_data,
scale,
zp,
axis=axis,
in_dtype=dtype,
)
mod = tvm.IRModule.from_expr(sim_q)
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, "llvm", params=None)
vm = VirtualMachine(vm_exec, tvm.cpu(0))
return vm
def verify_simulated_dequantize_simple(dtype):
data = np.random.uniform(low=-128, high=127, size=[2, 5]).astype(dtype)
data_fp = data.astype("float32")
scale_np = np.float32(0.5)
zp_np = np.int32(127)
dtype_np = np.int32(SQNN_DTYPE_TO_CODE[dtype])
quant_args = {"in_zero_point": zp_np, "in_scale": scale_np}
dq_out = dequantize_test_driver(
in_dtype=dtype,
quant_args=quant_args,
axis=-1,
in_data=data,
)
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[])
zp = relay.var("zp", shape=[], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32 |
")
vm = build_simulated_dequantize(input_data, scale, zp, dtype)
sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np)
np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5)
def test_simulated_dequantize():
verify_simulated_dequantize_simple("uint8")
verify_simulated_dequantize_simple("int8")
verify_simulated_dequantize_simple("int32")
def test_dynamic_channels():
data = np.random.uniform(low=-64, high=64, size=[2, 5]).astype("int8")
data_fp = data.astype("float32")
scale_np = np.asarray([0.5]).astype("float32")
zp_np = np.asarray([0]).astype("int32")
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["int8"])
quant_args = {"in_zero_point": zp_np[0], "in_scale": scale_np[0]}
dq_out = dequantize_test_driver(
in_dtype="int8",
quant_args=quant_args,
axis=0,
in_data=data,
)
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[relay.Any()], dtype="float32")
zp = relay.var("zp", shape=[relay.Any()], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_dequantize(input_data, scale, zp, dtype, axis=0)
sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np)
np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5)
scale_np = np.array([0.5, 0.25]).astype("float32")
zp_np = np.array([127, 123]).astype("int32")
quant_args = {"in_zero_point": zp_np, "in_scale": scale_np}
dq_out = dequantize_test_driver(
in_dtype="int8",
quant_args=quant_args,
axis=0,
in_data=data,
)
sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np)
np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5)
def test_dynamic_dtype():
data = np.random.uniform(low=0, high=255, size=[2, 5]).astype("uint8")
data_fp = data.ast |
ype("float32")
scale_np = np.asarray([0.5]).astype("float32")
zp_np = np.asarray([127]).astype("int32")
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["uint8"])
quant_args = {"in_zero_point": zp_np[0], "in_scale": scale_np[0]}
dq_out = dequantize_test_driver(
in_dtype="uint8",
quant_args=quant_args,
axis=-1,
in_data=data,
)
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[relay.Any()], dtype="float32")
zp = relay.var("zp", shape=[relay.Any()], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_dequantize(input_data, scale, zp, dtype)
sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np)
np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5)
data = np.random.uniform(low=0, high=255, size=[2, 5]).astype("int8")
data_fp = data.astype("float32")
dq_out = dequantize_test_driver(
in_dtype="int8",
quant_args=quant_args,
axis=-1,
in_data=data,
)
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["int8"])
sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np)
np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5)
if __name__ == "__main__":
test_simulated_dequantize()
test_dynamic_channels()
test_dynamic_dtype() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.