text
stringlengths 1
2.05k
|
---|
import canonicalizations
class TestIntegerTableLookupTable:
"""Consists of tests testing functionality of creating lookup tables for integer operations."""
def fake_identity_func_numpy(self, arr: np.ndarray):
return arr.astype("float32")
def fake_identity_func_relay(
self,
floating_point_func: Callable[[np.ndarray], np.ndarray],
input_arg=None,
in_scale=relay.const(1.0, dtype="float32"),
in_zero_point=relay.const(0, dtype="int32"),
out_scale=relay.const(1.0, dtype="float32"),
out_zero_point=relay.const(0, dtype="int32"),
in_axis=-1,
out_axis=-1,
in_dtype="uint8",
out_dtype="uint8",
):
if input_arg is None:
input_arg = relay.const(np.arange(0, 256, dtype="uint8").view(in_dtype))
return (
canonicalizations.create_integer_lookup_op(
input_arg=input_arg,
floating_point_func=floating_point_func,
in_scale=in_scale,
in_zero_point=in_zero_point,
out_scale=out_scale,
out_zero_point=out_zero_point,
in_axis=in_axis,
out_axis=out_axis,
in_dtype=in_dtype,
out_dtype=out_dtype,
),
input_arg.data.numpy(),
)
def dequantize_numpy(self, np_arr, np_scale=1.0, np_zero_point=0):
return (np_arr.astype("int32") - np_zero_point) * np_scale
def run_function_test(
self,
in_scale: float,
in_zero_point: int,
out_scale: float,
out_zero_point: int,
in_dtype: str,
out_dtype: str,
floating_point_func: Callable[[np.ndarray], np.ndarray],
input_arg: relay.Expr = None,
rtol=1e-7,
atol=0,
):
relay_lookup, input_arg = self.fake_identity_func_relay(
input_arg=input_arg,
floating_point_func=floating_point_func,
in_scale=relay.const(in_scale, "float32"), |
in_zero_point=relay.const(in_zero_point, "int32"),
out_scale=relay.const(out_scale, "float32"),
out_zero_point=relay.const(out_zero_point, "int32"),
in_dtype=in_dtype,
out_dtype=out_dtype,
)
result = canonicalizations.run_const_expr(relay_lookup)
np.testing.assert_allclose(
floating_point_func(
self.dequantize_numpy(input_arg, np_scale=in_scale, np_zero_point=in_zero_point)
),
self.dequantize_numpy(result, np_scale=out_scale, np_zero_point=out_zero_point),
atol=atol,
rtol=rtol,
)
"""Test mapping between different input/output dtypes"""
def test_int8_to_int8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=0,
out_scale=1.0,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
)
def test_uint8_to_uint8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=128,
out_scale=1.0,
out_zero_point=128,
in_dtype="uint8",
out_dtype="uint8",
floating_point_func=self.fake_identity_func_numpy,
)
def test_int8_to_uint8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=0,
out_scale=1.0,
out_zero_point=128,
in_dtype="int8",
out_dtype="uint8",
floating_point_func=self.fake_identity_func_numpy,
)
def test_uint8_to_int8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=128,
out_scale=1.0,
out_zero_point=0,
in_dtype="uint8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
)
"""Test different input shapes"""
def test_keep_input_shapes(self): |
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8").reshape([2, 2, 8, 8])),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
atol=0.03,
rtol=0.01,
)
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8").reshape([2, 2, 64])),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
atol=0.03,
rtol=0.01,
)
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8").reshape([2, 128])),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
atol=0.03,
rtol=0.01,
)
"""Test mapping with different in/out qparams works."""
def test_different_in_out_qparams(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=128,
out_scale=1.0,
out_zero_point=128,
in_dtype="uint8",
out_dtype="uint8",
floating_point_func=self.fake_identity_func_numpy,
atol=1,
rtol=0,
)
"""Test some simple functions"""
def test_tanh(self):
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8")),
in_scale=1 / 64,
in_zero_point=0,
out_scale=1 / 128,
out_zero_point=0,
in_dtype="int8",
out |
_dtype="int8",
floating_point_func=np.tanh,
atol=0.01,
rtol=0.01,
)
def test_exp(self):
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8")),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=np.exp,
atol=0.03,
rtol=0.01,
) |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.testing.aot |
import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils |
import (
AOT_CORSTONE300_RUNNER,
)
class BasicPoolTests:
@tvm.testing.requires_corstone300
def test_pool(
self,
pool_type,
shape,
dtype,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
schedule_name,
):
"""Test a subgraph with a single pool operator."""
ishape = shape
input0 = relay.var("input", relay.TensorType(ishape, dtype))
out0 = getattr(relay.op.nn, pool_type)(
input0,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
out1 = getattr(relay.op.nn, pool_type)(
input1,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
) |
class TestAvgPool1d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
(
shape,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
) = tvm.testing.parameters(
((3, 32, 27), (3,), (2,), 0, 1, "NCW", False, False),
((3, 32, 27), (3,), (2,), 0, 1, "NWC", False, False),
((3, 32, 27), (3,), (2,), 0, 1, "NCW", True, False),
((3, 32, 27), (3,), (2,), 1, 1, "NCW", False, True),
((1, 1, 32), 3, 1, 0, 1, "NCW", False, False),
((1, 4, 20), 3, 2, 2, 1, "NCW", False, False),
)
pool_type = tvm.testing.parameter("avg_pool1d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu") |
class TestAvgPool2d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
(
shape,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
) = tvm.testing.parameters(
((3, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", False, False),
((3, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NHWC", False, False),
((2, 16, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True, False),
((2, 27, 27, 16), (3, 3), (2, 2), 0, 1, "NHWC", True, False),
((2, 16, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True, True),
((1, 25, 5, 64), (25, 5), (25, 5), 0, 1, "NHWC", False, False),
((1, 3, 3, 256), (3, 3), (3, 3), 0, 1, "NHWC", False, False),
((1, 8, 8, 64), (8, 8), (8, 8), 0, 1, "NHWC", False, False),
((1, 1, 32, 32), (3, 3), 1, 0, 1, "NCHW", False, False),
((1, 4, 32, 20), (3, 3), (2, 2), 0, 1, "NCHW", False, False),
)
pool_type = tvm.testing.parameter("avg_pool2d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu") |
class TestAvgPool3d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
(
shape,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
) = tvm.testing.parameters(
((3, 4, 8, 27, 27), (3, 3, 3), 2, 0, 1, "NCDHW", False, False),
)
pool_type = tvm.testing.parameter("avg_pool3d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.testing.aot |
import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils |
import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv1dTests:
@tvm.testing.requires_corstone300
def test_conv1d(
self,
data_shape,
kernel_size,
num_filter,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single conv1d_ncw operator."""
ishape = data_shape
wshape = (num_filter, data_shape[1], kernel_size)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.conv1d(
input0,
weight0,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NCW",
kernel_layout="OIW",
out_dtype="int32",
out_layout="NCW",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
weight1 = relay.const(weight_data)
out1 = relay.op.nn.conv1d(
input1,
weight1,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NCW",
kernel_layout="OIW",
out_dtype="int32",
out_layout="NCW",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
}, |
schedule_name=schedule_name,
) |
class TestConv1d_ncw(BasicConv1dTests):
"""This test is for conv1d_ncw.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = tvm.testing.parameters(
((4, 32, 16), 3, 12, 1, 0, 1),
((4, 16, 32), 3, 12, 1, 0, 1),
((1, 12, 32), 3, 16, 1, 0, 1),
((3, 10, 12), 4, 24, 1, 0, 1),
((1, 7, 7), 3, 5, 1, 0, 1),
((1, 2, 10), 4, 4, 2, (1, 1), 1),
((1, 2, 20), 4, 4, 2, (0, 1), 1),
((1, 4, 16), 1, 12, 1, (1, 0), 1),
((1, 16, 24), 1, 32, 3, (2, 2), 1),
)
dtype = tvm.testing.parameter("int8", "int16")
data_layout = tvm.testing.parameter("NCW")
schedule_name = tvm.testing.parameter("conv1d_ncw.generic")
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.testing.aot |
import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils |
import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv1dTests:
@tvm.testing.requires_corstone300
def test_conv1d(
self,
data_shape,
kernel_size,
kernel_layout,
num_filter,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single conv1d_nwc operator."""
ishape = data_shape
wshape = (kernel_size, data_shape[-1], num_filter)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.conv1d(
input0,
weight0,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NWC",
kernel_layout="WIO",
out_dtype="int32",
out_layout="NWC",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
if kernel_layout == "WOI":
weight1 = relay.const(np.moveaxis(weight_data, 1, -1))
else:
weight1 = relay.const(weight_data)
out1 = relay.op.nn.conv1d(
input1,
weight1,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NWC",
kernel_layout=kernel_layout,
out_dtype="int32",
out_layout="NWC",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c |
",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
) |
class TestConv1d_dsp(BasicConv1dTests):
"""This test is for conv1d_dsp schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = tvm.testing.parameters(
((4, 32, 16), 3, 12, 1, 0, 1),
((4, 16, 32), 3, 12, 1, 0, 1),
((4, 32, 16), 3, 12, 1, 0, 1),
((1, 32, 12), 3, 16, 1, 0, 1),
((1, 16, 4), 1, 12, 1, (1, 0), 1),
((1, 24, 16), 1, 32, 3, (2, 2), 1),
)
dtype = tvm.testing.parameter("int8", "int16")
data_layout = tvm.testing.parameter("NWC")
kernel_layout = tvm.testing.parameter("WOI")
schedule_name = tvm.testing.parameter("conv1d_dsp") |
class TestConv1d_nwc(BasicConv1dTests):
"""This test is for conv1d_nwc.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = tvm.testing.parameters(
((4, 32, 16), 3, 12, 1, 0, 1),
((4, 16, 32), 3, 12, 1, 0, 1),
((4, 32, 16), 3, 12, 1, 0, 1),
((1, 32, 12), 3, 16, 1, 0, 1),
((3, 12, 10), 4, 24, 1, 0, 1),
((1, 7, 7), 3, 5, 1, 0, 1),
((1, 10, 2), 4, 4, 2, (1, 1), 1),
((1, 20, 2), 4, 4, 2, (0, 1), 1),
((1, 16, 4), 1, 12, 1, (1, 0), 1),
((1, 24, 16), 1, 32, 3, (2, 2), 1),
)
dtype = tvm.testing.parameter("int8", "int16")
data_layout = tvm.testing.parameter("NWC")
kernel_layout = tvm.testing.parameter("WIO")
schedule_name = tvm.testing.parameter("conv1d_nwc.generic")
if __name__ == "__main__":
tvm.testing.main() |
"""Tests for arm_cpu schedules for regular conv2d."""
from test_generalized_conv2d |
import GeneralizedConv2dTests
from tvm.testing |
import fixture, main, parameter, parameters |
class Conv2dTests(GeneralizedConv2dTests):
"""Helper for constructing regular Conv2ds. Always sets groups to 1. We set the reference
kernel layout here as we must pick something, but the x86 implementation supports several."""
@fixture
def groups(self):
"""Using a fixture instead of a parameter stops Pytest from adding the (redundant) number of
groups to the name of each test."""
return 1
def setup_method(self):
self.ref_kernel_layout = "HWIO" |
class TestConv2d_NHWC_DSP(Conv2dTests):
"""This test is for conv2d_nhwc_dsp.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
((1, 16, 16, 32), (1, 1), 64, (2, 2), 0, 1),
((4, 16, 16, 8), (5, 5), 8, 2, (0, 4, 4, 0), 1),
((4, 16, 16, 8), (5, 5), 16, 2, (0, 4, 4, 0), 1),
((4, 16, 16, 8), (5, 5), 8, 2, 0, 1),
((4, 16, 16, 8), (5, 5), 16, 2, 0, 1),
((1, 16, 16, 8), (3, 3), 16, 2, (0, 0, 1, 1), 1),
((1, 16, 16, 8), (3, 3), 16, 2, (1, 1, 2, 2), 1),
((1, 16, 16, 8), (5, 5), 16, 2, (3, 3, 2, 2), 1),
((1, 16, 16, 8), (3, 3), 16, 2, (0, 1, 2, 3), 1),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWOI")
out_layout = parameter("NHWC")
schedule_name = parameter("conv2d_nhwc_dsp.arm_cpu") |
class TestConv2d_NHWC_Spatial_Pack(Conv2dTests):
"""This test is for conv2d_nhwc_spatial_pack.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 1), (3, 3), 12, 1, 0, 1),
((1, 32, 10, 3), (3, 3), 16, 1, 0, 1),
((1, 49, 10, 1), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWIO")
out_layout = parameter("NHWC")
schedule_name = parameter("conv2d_nhwc_spatial_pack.arm_cpu") |
class TestConv2d_Tensordot(Conv2dTests):
"""This test is for the regular conv2d schedule tensorized using tensordot."""
data_shape, kernel_size, num_filter, strides, padding = parameters(
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0)),
((1, 16, 16, 32), (1, 1), 64, (2, 2), 0),
((1, 49, 10, 1), (10, 4), 64, (2, 1), (4, 1, 5, 1)),
((4, 16, 16, 16), (5, 5), 8, 2, 0),
)
dilation = parameter(1)
in_dtype = parameter("int8", "int16", "int32")
data_layout = parameter("NHWC")
kernel_layout = parameter("OHWI")
out_layout = parameter("NHWC", "NCHW")
schedule_name = parameter("conv2d_nhwc_ohwi_dsp.arm_cpu") |
class TestConv2d_NCHW_Spatial_Pack(Conv2dTests):
"""This test is for conv2d_nchw_spatial_pack.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation, in_dtype = parameters(
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1, "int8"),
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1, "int16"),
((1, 16, 16, 32), (3, 3), 12, 1, 0, 1, "int16"),
)
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NCHW")
schedule_name = parameter("conv2d_nchw_spatial_pack.arm_cpu")
if __name__ == "__main__":
main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.testing.aot |
import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils |
import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv2dTests:
@tvm.testing.requires_corstone300
def test_conv2d_NCHWc(
self,
data_shape,
kernel_size,
data_layout,
kernel_layout,
num_filter,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single conv2d_NCHWc operator."""
ishape = data_shape
wshape = (num_filter, data_shape[1], *kernel_size)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.contrib_conv2d_nchwc(
relay.layout_transform(input0, "NCHW", data_layout),
relay.layout_transform(weight0, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
channels=num_filter,
out_dtype="",
out_layout="",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
weight1 = relay.const(weight_data)
out1 = relay.op.nn.contrib_conv2d_nchwc(
relay.layout_transform(input1, "NCHW", data_layout),
relay.layout_transform(weight1, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
channels=num_filter,
out_dtype="",
out_layout="",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data |
(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
) |
class TestConv2d_NCHWc(BasicConv2dTests):
"""This test is for conv2d_NCHWc.x86 schedule."""
(
data_shape,
kernel_size,
num_filter,
strides,
padding,
dilation,
dtype,
kernel_layout,
data_layout,
) = tvm.testing.parameters(
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int16", "OIHW4i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int32", "OIHW4i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int8", "OIHW2i8o", "NCHW8c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int16", "OIHW2i8o", "NCHW8c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int32", "OIHW2i8o", "NCHW8c"),
((1, 64, 28, 28), (3, 3), 64, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (1, 1), 64, (1, 1), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (3, 3), 128, (2, 2), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (1, 1), 128, (2, 2), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 128, 14, 14), (3, 3), 128, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 128, 14, 14), (3, 3), 256, (2, 2), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 128, 14, 14), (1, 1), 256, (2, 2), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 256, 7, 7), (3, 3), 256, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 256, 7, 7), (3, 3), 512, (2, 2), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 256, 7, 7), (1, 1), 512, (2, 2), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 512, 3, 3), (3, 3), 512, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
)
schedule_name = tvm.testing.parameter("conv2d_NCHWc.x86")
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.testing.aot |
import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils |
import (
AOT_CORSTONE300_RUNNER,
)
class BasicDenseTests:
@tvm.testing.requires_corstone300
def test_dense(self, shape, weight_shape, dtype, schedule_name, enable_bias):
"""Test a subgraph with a single dense operator."""
ishape = shape
wshape = weight_shape
out_dtype = "int32"
units = weight_shape[0]
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
if enable_bias:
bias_data = np.random.randint(low=-10, high=10, size=(wshape[0]), dtype=out_dtype)
input = relay.var("input", relay.TensorType(ishape, dtype))
weight = relay.const(weight_data)
dense = relay.op.nn.dense(
input,
weight,
units=units,
out_dtype=out_dtype,
)
if enable_bias:
bias = relay.const(bias_data)
relay_op = relay.op.nn.bias_add(dense, bias)
else:
relay_op = dense
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
ref_mod = tvm.IRModule.from_expr(relay.Function([input], relay_op))
output_list = generate_ref_data(ref_mod, inputs)
mod = tvm.IRModule.from_expr(relay.Function([input], relay_op))
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
) |
class TestDense(BasicDenseTests):
"""This test is for dense_dsp schedule."""
shape, weight_shape = tvm.testing.parameters(
((8, 128), (32, 128)),
((32, 32), (32, 32)),
((1, 64), (1, 64)),
((11, 2), (2, 2)),
((1, 32), (64, 32)),
((3, 12), (10, 12)),
)
dtype = tvm.testing.parameter("int8", "int16")
schedule_name = tvm.testing.parameter("dense_dsp.arm_cpu")
enable_bias = tvm.testing.parameter(False, True)
if __name__ == "__main__":
tvm.testing.main() |
"""Tests for arm_cpu schedules for depthwise_conv2d."""
from test_generalized_conv2d |
import GeneralizedConv2dTests
from tvm.testing |
import fixture, main, parameter, parameters |
class DepthwiseConv2dTests(GeneralizedConv2dTests):
"""Helper for constructing depthwise Conv2ds. Sets the reference kernel layout to what x86 code
supports."""
@fixture
def groups(self, data_shape):
"""By definition, a depthwise_conv2d has a number of groups equal to the number of input
channels, so we don't need to specify the number of groups each time."""
return data_shape[3]
def setup_method(self):
self.ref_kernel_layout = "HWOI" |
class TestDepthwiseConv2d_NCHW_OIHW(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nchw.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 10, 3, 32), (3, 3), 32, 1, 0, 1),
((1, 32, 16, 32), (3, 3), 32, 1, (0, 2, 2, 0), 1),
((1, 32, 16, 32), (3, 3), 32, 1, 0, 1),
((1, 32, 16, 32), (3, 3), 32, 1, 0, 1),
((1, 32, 16, 32), (3, 3), 32, 1, (0, 2, 2, 0), 2),
((1, 32, 16, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NCHW")
schedule_name = parameter("depthwise_conv2d_nchw.arm_cpu") |
class TestDepthwiseConv2d_NHWC_HWOI(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nhwc.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 10, 16), (3, 3), 16, 1, 0, 1),
((1, 49, 10, 64), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWOI")
out_layout = parameter("NHWC")
schedule_name = parameter("depthwise_conv2d_nhwc.generic") |
class TestDepthwiseConv2d_NHWC_HWOI_DSP(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nhwc_dsp.arm_cpu schedule. The tests that are parameterized
by dtype work for both int8 and int16, while the others only work on the specified dtype."""
in_dtype_parameterized_tests = [
((1, 48, 48, 8), (3, 3), 8, (1, 1), 1),
((1, 48, 48, 16), (3, 3), 16, (2, 2), (1, 1, 0, 0)),
((1, 24, 24, 32), (3, 3), 32, (1, 1), 1),
((1, 24, 24, 32), (3, 3), 32, (2, 2), (1, 1, 0, 0)),
((1, 12, 12, 64), (3, 3), 64, (1, 1), 1),
((1, 12, 12, 64), (3, 3), 64, (2, 2), (1, 1, 0, 0)),
((1, 6, 6, 128), (3, 3), 128, (1, 1), 1),
((1, 6, 6, 128), (3, 3), 128, (2, 2), (1, 1, 0, 0)),
((1, 3, 3, 256), (3, 3), 256, (1, 1), 1),
((1, 25, 5, 64), (3, 3), 64, (1, 1), 1),
((1, 24, 24, 8), (5, 5), 8, (1, 1), 1),
((1, 24, 24, 8), (3, 5), 8, (1, 1), 1),
]
data_shape, kernel_size, num_filter, strides, padding, in_dtype = parameters(
*map(lambda t: t + ("int8",), in_dtype_parameterized_tests),
*map(lambda t: t + ("int16",), in_dtype_parameterized_tests),
((1, 48, 48, 6), (3, 3), 6, (1, 1), 1, "int16"),
)
dilation = parameter(1)
data_layout = parameter("NHWC")
kernel_layout = parameter("HWOI")
out_layout = parameter("NHWC")
schedule_name = parameter("depthwise_conv2d_nhwc_dsp.arm_cpu") |
class TestDepthwiseConv2d_Tensordot(DepthwiseConv2dTests):
"""This test is for the depthwise_conv2d schedule tensorized using tensordot."""
data_shape, kernel_size, num_filter, strides, padding, in_dtype = parameters(
((1, 48, 48, 8), (3, 3), 8, (1, 1), 1, "int32"),
((1, 48, 48, 16), (3, 3), 16, (2, 2), (1, 1, 0, 0), "int32"),
((1, 24, 24, 32), (3, 3), 32, (1, 1), 1, "int32"),
((1, 24, 24, 32), (3, 3), 32, (2, 2), (1, 1, 0, 0), "int32"),
((1, 12, 12, 64), (3, 3), 64, (1, 1), 1, "int32"),
((1, 12, 12, 64), (3, 3), 64, (2, 2), (1, 1, 0, 0), "int32"),
((1, 6, 6, 128), (3, 3), 128, (1, 1), 1, "int32"),
((1, 6, 6, 128), (3, 3), 128, (2, 2), (1, 1, 0, 0), "int32"),
((1, 3, 3, 256), (3, 3), 256, (1, 1), 1, "int32"),
((1, 25, 5, 64), (3, 3), 64, (1, 1), 1, "int32"),
((1, 24, 24, 8), (5, 5), 8, (1, 1), 1, "int32"),
((1, 24, 24, 8), (3, 5), 8, (1, 1), 1, "int32"),
((1, 48, 48, 8), (3, 2), 8, 1, 0, "int16"),
((1, 48, 48, 8), (4, 4), 8, 1, 0, "int8"),
)
dilation = parameter(1)
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NHWC", "NCHW")
schedule_name = parameter("depthwise_conv2d_nchw_oihw_dsp.arm_cpu")
if __name__ == "__main__":
main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.testing.aot |
import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils |
import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv2dTests:
@tvm.testing.requires_corstone300
def test_depthwise_conv2d_NCHWc(
self,
data_shape,
kernel_size,
data_layout,
kernel_layout,
groups,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single depthwise_conv2d_nchwc operator."""
ishape = data_shape
wshape = (data_shape[1], 1, *kernel_size)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
groups = groups
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.contrib_depthwise_conv2d_nchwc(
relay.layout_transform(input0, "NCHW", data_layout),
relay.layout_transform(weight0, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
groups=groups,
out_dtype="",
out_layout="",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
weight1 = relay.const(weight_data)
out1 = relay.op.nn.contrib_depthwise_conv2d_nchwc(
relay.layout_transform(input1, "NCHW", data_layout),
relay.layout_transform(weight1, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
groups=groups,
out_dtype="",
out_layout="",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)} |
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
) |
class TestDepthWiseConv2d_NCHWc(BasicConv2dTests):
"""This test is for depthwise_conv2d_NCHWc schedule."""
(
data_shape,
kernel_size,
groups,
strides,
padding,
dilation,
kernel_layout,
data_layout,
) = tvm.testing.parameters(
((1, 16, 32, 32), (3, 3), 16, (1, 1), (1, 1, 1, 1), (1, 1), "OIHW1i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1, 1, 1), (1, 1), "OIHW1i8o", "NCHW8c"),
)
dtype = tvm.testing.parameter("int8", "int16", "int32")
schedule_name = tvm.testing.parameter("depthwise_conv2d_NCHWc")
if __name__ == "__main__":
tvm.testing.main() |
"""Helper class for testing variations of 2D convolution. Should be used by subclassing
`GeneralizedConv2dTests`, and then setting the arguments using tvm.testing.parameter(s).""" |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.testing.aot |
import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils |
import AOT_CORSTONE300_RUNNER
def _change_ndarray_layout(arr, src_layout, dst_layout):
"""Makes a copy of an ndarray, reshaping it to a new data layout.
Parameter
---------
arr : numpy.ndarray
The ndarray to be reformatted.
src_layout : str
The current layout of the Relay constant. Must be alphabetic (e.g. NHWC
or OIHW, but not NCHW2c).
dst_layout : str
The desired layout of new the Relay constant. Must be alphabetic (e.g. NHWC
or OIHW, but not NCHW2c).
Returns
-------
dst_shape : numpy.ndarray
A copy of the ndarray with the new layout.
"""
assert src_layout.isalpha() and dst_layout.isalpha()
axis_order = [src_layout.index(c) for c in dst_layout]
return np.transpose(arr, axis_order)
class GeneralizedConv2dTests:
"""Superclass which can be used to test regular, depthwise, or grouped conv2D. Cannot be used
for 5D data formats (NCHWc and such) as written, but could be extended. Might also be worth
abstracting some of this logic into an even more general class that could be used for other
operators.
Note that data_shape should always be a tuple of length four indicating the data shape in NHWC
format (it will later be reshaped according to the given data_layout), and kernel_size should be
a length two tuple giving the height and width of the kernel.
This test (and other base Conv2dTests classes) are not run by Pytest, as their names do not
start with `Test`."""
@tvm.testing.requires_corstone300
def test_conv2d(
self,
data_shape,
kernel_size,
num_filter,
in_dtype,
strides,
padding,
groups,
dilation,
data_layout,
kernel_layout,
out_layout,
schedule_name,
):
"""Test a subgraph with a single conv2d operator."""
ref_input_data = np.random.randint(low=-128, high=127, size=data_shape, dtype=in_dtype)
ref_input_var = relay.var("input", relay.T |
ensorType(data_shape, in_dtype))
kernel_shape = (*kernel_size, data_shape[-1]
ref_kernel_data = np.random.randint(low=-10, high=10, size=kernel_shape, dtype=in_dtype)
"""Our x86 depthwise implementation only supports HWOI with NHWC, so we need to change our
kernel layout to work around this. We can't just change the whole thing to HWIO or
something else, as then group conv2d would not work. Eventually, we should switch to using
TensorFlow to create the reference output so we can ensure our implementation is right.
See https:
ref_relay_op = relay.op.nn.conv2d(
ref_input_var,
relay.const(_change_ndarray_layout(ref_kernel_data, "HWIO", self.ref_kernel_layout)),
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
dilation=(dilation, dilation),
data_layout="NHWC",
kernel_layout=self.ref_kernel_layout,
out_dtype="int32",
out_layout="NHWC",
)
ref_module = tvm.IRModule.from_expr(relay.Function([ref_input_var], ref_relay_op))
ref_outputs = generate_ref_data(ref_module, {"input": ref_input_data})
assert len(ref_outputs) == 1
output_tensor_name, output_tensor = next(iter(ref_outputs.items()))
ref_outputs[output_tensor_name] = _change_ndarray_layout(output_tensor, "NHWC", out_layout)
test_input_data = _change_ndarray_layout(ref_input_data, "NHWC", data_layout)
test_input_var = relay.var("input", relay.TensorType(test_input_data.shape, in_dtype))
test_kernel_data = _change_ndarray_layout(ref_kernel_data, "HWIO", kernel_layout)
test_relay_op = relay.op.nn.conv2d(
test_input_var,
relay.const(test_kernel_data),
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
dilation=(dilation, dilation),
data_layout |
=data_layout,
kernel_layout=kernel_layout,
out_dtype="int32",
out_layout=out_layout,
)
test_function = relay.Function([test_input_var], test_relay_op)
test_model = AOTTestModel(
module=tvm.IRModule.from_expr(test_function),
inputs={"input": test_input_data},
outputs=ref_outputs,
)
compile_and_run(
test_model,
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
) |
"""Tests for arm_cpu schedules for grouped conv2d."""
from test_generalized_conv2d |
import GeneralizedConv2dTests
from tvm.testing |
import main, parameter, parameters |
class GroupConv2dTests(GeneralizedConv2dTests):
"""Helper for constructing group Conv2ds. Sets the reference kernel layout to what x86 code
supports."""
def setup_method(self):
self.ref_kernel_layout = "HWIO" |
class TestGroupConv2d_NCHW_OIHW(GroupConv2dTests):
"""This test is for group_conv2d_nchw.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1),
((1, 32, 10, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 32, 1, (1, 1, 2, 2), 2),
)
groups = parameter(2, 4)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NCHW")
schedule_name = parameter("group_conv2d_nchw.arm_cpu") |
class TestGroupConv2d_NHWC_HWIO(GroupConv2dTests):
"""This test is for group_conv2d_nhwc.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1),
((1, 32, 10, 16), (3, 3), 16, 1, 0, 1),
((1, 49, 10, 16), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
groups = parameter(2, 4)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWIO")
out_layout = parameter("NHWC")
schedule_name = parameter("group_conv2d_nhwc.generic")
if __name__ == "__main__":
main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.testing.aot |
import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils |
import (
AOT_CORSTONE300_RUNNER,
)
class BasicPoolTests:
@tvm.testing.requires_corstone300
def test_pool(
self,
pool_type,
shape,
dtype,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
schedule_name,
):
"""Test a subgraph with a single max_pool operator."""
ishape = shape
input0 = relay.var("input", relay.TensorType(ishape, dtype))
out0 = getattr(relay.op.nn, pool_type)(
input0,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
out1 = getattr(relay.op.nn, pool_type)(
input1,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
) |
class TestMaxPool1d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
shape, pool_size, strides, padding, dilation, layout, ceil_mode = tvm.testing.parameters(
((3, 32, 27), (3,), (2,), 0, 1, "NCW", True),
((1, 32, 1), 3, 1, 0, 1, "NWC", False),
((1, 20, 4), 3, 2, 0, 1, "NWC", False),
)
pool_type = tvm.testing.parameter("max_pool1d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu") |
class TestMaxPool2d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
shape, pool_size, strides, padding, dilation, layout, ceil_mode = tvm.testing.parameters(
((2, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", False),
((2, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True),
((1, 26, 26, 12), (2, 2), (2, 2), 0, 1, "NHWC", False),
((1, 11, 11, 32), (2, 2), (2, 2), 0, 1, "NHWC", False),
((1, 3, 3, 64), (2, 2), (2, 2), 0, 1, "NHWC", False),
((1, 32, 32, 1), (3, 3), 1, 0, 1, "NHWC", False),
((1, 32, 20, 4), (3, 3), (2, 2), 0, 1, "NHWC", False),
((1, 32, 32, 1), (3, 3), 1, 0, 1, "NHWC", True),
((1, 32, 20, 4), (3, 3), (2, 2), 0, 1, "NHWC", True),
)
pool_type = tvm.testing.parameter("max_pool2d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu") |
class TestMaxPool3d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
shape, pool_size, strides, padding, dilation, layout, ceil_mode = tvm.testing.parameters(
((3, 4, 8, 27, 27), (3, 3, 3), 2, 0, 1, "NCDHW", False),
)
pool_type = tvm.testing.parameter("max_pool3d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm
from tvm |
import relay
from tvm.relay |
import testing
from tvm.relay.backend.interpreter |
import ConstructorValue
from tvm.relay |
import create_executor
from tvm.relay.prelude |
import Prelude, StaticTensorArrayOps
from tvm.relay.testing |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.