text
stringlengths
1
2.05k
import sys
import pytest
import numpy as np
import tvm from tvm
import autotvm, te, topi
import tvm.topi.testing from tvm.contrib
import cudnn from tvm.topi.nn.utils
import get_pad_tuple from tvm.topi.utils
import get_const_tuple from tvm.topi.nn.conv2d
import _get_workload from tvm.topi.x86.conv2d_avx_common
import _fallback_schedule
import tvm.testing dtype = tvm.testing.parameter("float16", "float32") random_seed = tvm.testing.parameter(0) @tvm.testing.fixture def input_shape(batch, in_channel, in_size): return (batch, in_channel, in_size, in_size) @tvm.testing.fixture def weight_shape(num_filter, in_channel, kernel): return (num_filter, in_channel, kernel, kernel) @tvm.testing.fixture def bias_shape(num_filter): return (num_filter, 1, 1) @tvm.testing.fixture(cache_return_value=True) def ref_data( random_seed, input_shape, weight_shape, bias_shape, dtype, stride, padding, dilation, add_bias, apply_relu, ): np.random.seed(random_seed) conv_dtype = "float32" if dtype == "float16" else dtype a_np = np.random.uniform(size=input_shape).astype(dtype) w_np = np.random.uniform(size=weight_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) c_np = tvm.topi.testing.conv2d_nchw_python( a_np.astype(conv_dtype), dw_np.astype(conv_dtype), stride, padding ).astype(dtype) if add_bias: c_np = c_np + b_np if apply_relu: c_np = np.maximum(c_np, 0) return a_np, w_np, b_np, c_np class BaseConv2DTests: add_bias = tvm.testing.parameter(False) apply_relu = tvm.testing.parameter(False) dilation = tvm.testing.parameter(1) batch = tvm.testing.parameter(1) def test_conv2d_nchw( self, target, dev, batch, in_channel, in_size, num_filter, kernel, stride, padding, dtype, ref_data, dilation, add_bias, apply_relu, ): target = tvm.target.Target(target) is_cudnn_target = target.kind.name == "cuda" and "cudnn" in target.attrs.get("libs", []) if target.kind.name == "vulkan" and dtype == "float16": if not target.attrs.get("supports_float16", False) or n
ot target.attrs.get( "supports_16bit_buffer", False ): pytest.xfail("Vulkan device does not support float16") if ( target.kind.name == "cuda" and dtype == "float16" and not tvm.contrib.nvcc.have_fp16(dev.compute_version) ): pytest.xfail("CUDA float16 intrinsics not available") pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel)) padding_sum = pad_top + pad_left + pad_bottom + pad_right has_asymmetric_padding = (pad_top != pad_bottom) or (pad_left != pad_right) if is_cudnn_target and has_asymmetric_padding: pytest.xfail("CuDNN does not support asymmetric padding") a_np, w_np, b_np, c_np = ref_data A = te.placeholder(a_np.shape, name="A", dtype=dtype) W = te.placeholder(w_np.shape, name="W", dtype=dtype) bias = te.placeholder(b_np.shape, name="bias", dtype=dtype) if "int" in dtype: tol = {"atol": 0, "rtol": 0} elif dtype == "float32": tol = {"rtol": 1e-4, "atol": 2e-4} elif dtype == "float16": num_values_summed = in_channel * kernel * kernel gap_size = np.nextafter(c_np.max(), np.inf, dtype=c_np.dtype) - c_np.max() tol = {"rtol": 1e-3, "atol": num_values_summed * gap_size / 2} with autotvm.tophub.context(target): if is_cudnn_target: fcompute, fschedule = topi.cuda.conv2d_cudnn, topi.cuda.schedule_conv2d_cudnn else: fcompute, fschedule = tvm.topi.testing.get_conv2d_nchw_implement(target) with target: if is_cudnn_target: C = fcompute( A, W, (stride, stride), padding, (dilation, dilation), 1, "NCHW", dtype ) else: C = fcompute(A, W, (stride, stride), padding, (dilation, dilat
ion), dtype) if add_bias: C = topi.add(C, bias) if apply_relu: C = topi.nn.relu(C) s = fschedule([C]) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev) func = tvm.build( s, [A, W, bias, C], target, name="conv2d_{}_{}_{}_{}_{}_{}_{}_{}_{}".format( dtype, batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation, ), ) func(a, w, b, c) tvm.testing.assert_allclose(c.numpy(), c_np, **tol) @tvm.testing.parametrize_targets("llvm") def test_workload_padding( self, target, input_shape, weight_shape, stride, padding, dilation, dtype, ref_data, ): a_np, w_np, b_np, c_np = ref_data _, _, out_height, out_width = c_np.shape A = te.placeholder(input_shape, name="A", dtype=dtype) W = te.placeholder(weight_shape, name="W", dtype=dtype) with tvm.target.Target(target): wkl = _get_workload(A, W, (stride, stride), padding, dilation, dtype) cfg = autotvm.get_config() _fallback_schedule(cfg, wkl) ow_tile = np.prod(cfg["tile_ow"].size) tvm.testing.assert_allclose(ow_tile, out_width)
class TestResNet18Workloads(BaseConv2DTests): in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters( (3, 224, 64, 7, 2, 3), (64, 56, 64, 3, 1, 1), (64, 56, 64, 1, 1, 0), (64, 56, 128, 3, 2, 1), (64, 56, 128, 1, 2, 0), (128, 28, 128, 3, 1, 1), (128, 28, 256, 3, 2, 1), (128, 28, 256, 1, 2, 0), (256, 14, 256, 3, 1, 1), (256, 14, 512, 3, 2, 1), (256, 14, 512, 1, 2, 0), (512, 7, 512, 3, 1, 1), )
class TestInceptionV3Workloads(BaseConv2DTests): in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters( (3, 299, 32, 3, 2, 0), (32, 149, 32, 3, 1, 0), (32, 147, 64, 3, 1, 1), (64, 73, 80, 1, 1, 0), (80, 73, 192, 3, 1, 0), (192, 35, 64, 1, 1, 0), (192, 35, 48, 1, 1, 0), (48, 35, 64, 5, 1, 2), (64, 35, 96, 3, 1, 1), (96, 35, 96, 3, 1, 1), (192, 35, 32, 1, 1, 0), (256, 35, 64, 1, 1, 0), (256, 35, 48, 1, 1, 0), (288, 35, 64, 1, 1, 0), (288, 35, 48, 1, 1, 0), (288, 35, 384, 3, 2, 0), (96, 35, 96, 3, 2, 0), (768, 17, 192, 1, 1, 0), (768, 17, 128, 1, 1, 0), (128, 17, 128, 1, 1, 0), (128, 17, 192, 7, 1, 3), (128, 17, 128, 7, 1, 3), (128, 17, 192, 1, 1, 0), (768, 17, 160, 1, 1, 0), (160, 17, 192, 7, 1, 3), (160, 17, 160, 7, 1, 3), (160, 17, 192, 1, 1, 0), (192, 17, 192, 1, 1, 0), (192, 17, 192, 7, 1, 3), (192, 17, 320, 3, 2, 0), (192, 17, 192, 3, 2, 0), (1280, 8, 320, 1, 1, 0), (1280, 8, 384, 1, 1, 0), (384, 8, 384, 1, 1, 0), (384, 8, 384, 3, 1, 1), (1280, 8, 448, 1, 1, 0), (448, 8, 384, 3, 1, 1), (1280, 8, 192, 1, 1, 0), (2048, 8, 320, 1, 1, 0), (2048, 8, 384, 1, 1, 0), (2048, 8, 448, 1, 1, 0), (2048, 8, 192, 1, 1, 0), (1024, 19, 84, 3, 1, 1), (2048, 10, 126, 3, 1, 1), (512, 5, 126, 3, 1, 1), (256, 3, 126, 3, 1, 1), )
class TestWeirdWorkloads(BaseConv2DTests): batch, in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters( (2, 2, 2, 2, 2, 2, 2), (3, 3, 3, 3, 3, 3, 3), (4, 4, 4, 4, 4, 4, 4), (5, 5, 5, 5, 5, 5, 5), (6, 6, 6, 6, 6, 6, 6), )
class TestAsymmetricPadding(BaseConv2DTests): dilation = tvm.testing.parameter(1, 2) in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters( (3, 35, 64, 7, 2, (0, 0, 1, 1)), (64, 8, 128, 3, 1, (3, 3, 2, 2)), (64, 8, 64, 1, 1, (1, 2, 2, 1)), (64, 17, 192, 1, 1, (1, 2)), (64, 8, 64, 3, 1, (3, 1)), (128, 8, 384, 3, 1, (0, 2)), (64, 35, 64, 3, 1, (1, 2)), (64, 8, 64, 1, 1, "VALID"), (388, 8, 64, 3, 1, "VALID"), (64, 10, 48, 3, 1, "VALID"), (512, 19, 64, 1, 1, "SAME"), (64, 5, 32, 2, 1, "SAME"), (64, 8, 64, 3, 1, "SAME"), (64, 8, 64, 3, 1, (1, 2, 2, 1)), (64, 8, 64, 5, 2, (1, 3)), (64, 8, 64, 3, 1, "VALID"), (64, 8, 64, 24, 1, "SAME"), (32, 35, 64, 7, 2, (0, 0, 2, 2)), )
class TestBatchSize(BaseConv2DTests): in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters( (64, 56, 64, 3, 1, 1), ) batch = tvm.testing.parameter(1, 4, 9)
class TestBiasRelu(BaseConv2DTests): apply_relu = tvm.testing.parameter(True, False, ids=["relu", "no_relu"]) add_bias = tvm.testing.parameter(True, False, ids=["bias", "no_bias"]) in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters( (64, 56, 64, 3, 1, 1), (64, 8, 64, 3, 1, (1, 2, 2, 1)), (64, 8, 64, 5, 2, (1, 3)), (64, 8, 64, 3, 1, "VALID"), (64, 8, 64, 24, 1, "SAME"), ) if __name__ == "__main__": tvm.testing.main()
"""Example code to do convolution."""
import os
import numpy as np
import tvm from tvm
import te from tvm
import topi
import tvm.topi.testing from tvm.contrib.pickle_memoize
import memoize from tvm.topi.utils
import get_const_tuple
import tvm.testing _conv2d_nhwc_implement = { "generic": (topi.nn.conv2d_nhwc, topi.generic.schedule_conv2d_nhwc), "gpu": (topi.gpu.conv2d_nhwc, topi.gpu.schedule_conv2d_nhwc), "cpu": (topi.nn.conv2d_nhwc, topi.x86.schedule_conv2d_nhwc), "arm_cpu": ( topi.arm_cpu.conv2d_nhwc_spatial_pack, topi.arm_cpu.schedule_conv2d_nhwc_spatial_pack, ), "mali": ( topi.mali.conv2d_nhwc_spatial_pack, topi.mali.schedule_conv2d_nhwc_spatial_pack, ), "bifrost": ( topi.mali.conv2d_nhwc_spatial_pack, topi.mali.schedule_conv2d_nhwc_spatial_pack, ), "hls": (topi.nn.conv2d_nhwc, topi.hls.schedule_conv2d_nhwc), } dtype = tvm.testing.parameter("float32") batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation = tvm.testing.parameters( (1, 256, 32, 256, 3, 1, "SAME", 1), (4, 128, 16, 128, 5, 2, "SAME", 1), (4, 128, 16, 256, 5, 2, "SAME", 1), (1, 256, 32, 256, 3, 1, "VALID", 1), (1, 256, 32, 256, 3, 1, "VALID", 1), (4, 128, 16, 128, 5, 2, "VALID", 1), (4, 128, 16, 256, 5, 2, "VALID", 1), (1, 128, 16, 256, 3, 2, (0, 0, 1, 1), 1), (1, 128, 16, 256, 3, 2, (1, 1, 2, 2), 1), (1, 128, 16, 128, 5, 2, (3, 3, 2, 2), 1), (1, 128, 16, 256, 3, 2, (0, 1, 2, 3), 1), (1, 256, 32, 256, 3, 1, "SAME", 2), (1, 256, 32, 256, 3, 1, (1, 1, 2, 2), 2), ) @tvm.testing.fixture(cache_return_value=True) def ref_data(dtype, batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation): in_height = in_width = in_size a_shape = (batch, in_height, in_width, in_channel) w_shape = (kernel, kernel, in_channel, num_filter) a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) return a_np, w_np, b_np def test_conv2d_nhwc_hwio(target, dev, ref_data, dtype, stride, padding, dil
ation): a_np, w_np, b_np = ref_data A = te.placeholder(a_np.shape, name="A", dtype=dtype) W = te.placeholder(w_np.shape, name="W", dtype=dtype) with tvm.target.Target(target): fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv2d_nhwc_implement) B = fcompute(A, W, stride, padding, dilation, dtype) s = fschedule([B]) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, W, B], target) func(a, w, b) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) def test_conv2d_nhwc_ohwi(ref_data, dtype, stride, padding, dilation): target = "llvm" dev = tvm.device(target, 0) a_np, w_np_hwio, b_np = ref_data w_np_ohwi = w_np_hwio.transpose(3, 0, 1, 2) A = te.placeholder(a_np.shape, name="A", dtype=dtype) W = te.placeholder(w_np_ohwi.shape, name="W", dtype=dtype) B = topi.nn.conv2d( A, W, stride, padding, dilation, data_layout="NHWC", kernel_layout="OHWI", out_dtype="float32", ) s = tvm.te.create_schedule(B.op) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np_ohwi, dev) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, W, B], target) func(a, w, b) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) if __name__ == "__main__": tvm.testing.main()
"""Example code to do convolution."""
import pytest
import numpy as np
import tvm from tvm
import te from tvm
import autotvm from tvm.autotvm.task.space
import FallbackConfigEntity from tvm
import topi
import tvm.topi.testing from tvm.contrib.pickle_memoize
import memoize from tvm.topi.utils
import get_const_tuple def verify_conv2d_1x1_nhwc_pack_int8( batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1 ): in_height = in_width = in_size A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="uint8") W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W", dtype="int8") a_shape = get_const_tuple(A.shape) w_shape = get_const_tuple(W.shape) adtype = A.dtype wdtype = W.dtype @memoize("topi.tests.test_topi_conv2d_1x1_nhwc_pack_int8.verify_nhwc.v2") def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(adtype) w_np = np.random.uniform(size=w_shape).astype(wdtype) dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) return a_np, w_np, b_np a_np, w_np, b_np = get_ref_data() def check_device(device): dev = tvm.device(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) with tvm.target.Target(device): B = topi.nn.conv2d(A, W, stride, padding, dilation, layout="NHWC", out_dtype="int32") s = topi.x86.schedule_conv2d_nhwc_pack_int8([B]) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, W, B], device) func(a, w, b) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) for device in ["llvm"]: check_device(device) @pytest.mark.skip def test_conv2d_nhwc(): verify_conv2d_1x1_nhwc_pack_int8(1, 256, 32, 256, 1, 1, 0) if __name__ == "__main__": pass
"""Example code to do convolution."""
import numpy as np
import tvm from tvm
import topi
import tvm.topi.testing from tvm
import te from tvm.contrib.pickle_memoize
import memoize from tvm.contrib
import nvcc from tvm.topi.nn.utils
import get_pad_tuple from tvm.topi.utils
import get_const_tuple
import tvm.testing _conv2d_nhwc_tensorcore_implement = { "cuda": (topi.cuda.conv2d_nhwc_tensorcore, topi.cuda.schedule_conv2d_nhwc_tensorcore) } def verify_conv2d_nhwc( batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, add_bias=False, add_relu=False, devices="cuda", ): """Test the conv2d with tensorcore for nhwc layout""" pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel)) padding_sum = pad_top + pad_left + pad_bottom + pad_right print( "Workload: (%d, %d, %d, %d, %d, %d, %d, %d)" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation) ) in_height = in_width = in_size A = te.placeholder((batch, in_height, in_width, in_channel), name="A") W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W") bias = te.placeholder((1, 1, 1, num_filter), name="bias") a_shape = get_const_tuple(A.shape) w_shape = get_const_tuple(W.shape) bias_shape = get_const_tuple(bias.shape) dtype = A.dtype @memoize("topi.tests.test_topi_conv2d_nhwc.verify_conv2d_nhwc") def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) c_np += b_np if add_relu: c_np = np.maximum(c_np, 0) return a_np, w_np, b_np, c_np a_np, w_np, b_np, c_np = get_ref_data() def check_device(device): dev = tvm.device(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) return if not nvcc.have_tensorcore(dev.co
mpute_version): print("skip because gpu does not support Tensor Cores") return print("Running on target: %s" % device) with tvm.target.Target(device): fcompute, fschedule = tvm.topi.testing.dispatch( device, _conv2d_nhwc_tensorcore_implement ) C = fcompute(A, W, stride, padding, dilation, "float32") if add_bias: C = topi.add(C, bias) if add_relu: C = topi.nn.relu(C) s = fschedule([C]) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev) if add_bias: func = tvm.build( s, [A, W, bias, C], device, name="relu_%d_%d_%d_%d_%d_%d_%d_%d" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, b, c) else: func = tvm.build( s, [A, W, C], device, name="relu_%d_%d_%d_%d_%d_%d_%d_%d" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, c) rtol = 1e-3 tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol) check_device(devices) @tvm.testing.requires_cuda @tvm.testing.requires_gpu def test_conv2d_nhwc_tensorcore(): """Test the conv2d with tensorcore for nhwc layout""" verify_conv2d_nhwc(16, 16, 14, 16, 3, 1, 1) verify_conv2d_nhwc(16, 128, 7, 128, 7, 1, 3) verify_conv2d_nhwc(16, 160, 7, 160, 7, 1, 3) verify_conv2d_nhwc(32, 64, 14, 64, 3, 1, 1, add_bias=True) verify_conv2d_nhwc(32, 64, 14, 64, 3, 1, 1, add_relu=True) verify_conv2d_nhwc(32, 64, 14, 64, 3, 1, 1, add_relu=True, add_bias=True) verify_conv2d_nhwc(16, 64, 17, 64, 7, 1, (3, 3, 2, 2)) verify_co
nv2d_nhwc(16, 64, 17, 64, 7, 1, "SAME") verify_conv2d_nhwc(16, 48, 35, 48, 5, 1, "VALID") verify_conv2d_nhwc(16, 48, 56, 48, 3, 1, (1, 1, 1, 1)) verify_conv2d_nhwc(16, 64, 28, 64, 3, 1, (1, 1, 1, 1)) if __name__ == "__main__": test_conv2d_nhwc_tensorcore()
"""Example code to do convolution."""
import numpy as np
import tvm from tvm
import topi
import tvm.topi.testing from tvm
import te from tvm.contrib.pickle_memoize
import memoize from tvm.topi.nn.utils
import get_pad_tuple from tvm.topi.utils
import get_const_tuple
import tvm.testing _conv2d_nhwc_winograd_tensorcore = { "cuda": ( topi.cuda.conv2d_nhwc_winograd_tensorcore, topi.cuda.schedule_conv2d_nhwc_winograd_tensorcore, ) } _conv2d_nhwc_winograd_direct = { "cuda": (topi.cuda.conv2d_nhwc_winograd_direct, topi.cuda.schedule_conv2d_nhwc_winograd_direct) } def verify_conv2d_nhwc( batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, add_bias=False, add_relu=False, devices="cuda", bgemm="direct", ): """Test the conv2d with winograd for nhwc layout""" pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel)) padding_sum = pad_top + pad_left + pad_bottom + pad_right print( "Workload: (%d, %d, %d, %d, %d, %d, %d, %d)" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation) ) in_height = in_width = in_size A = te.placeholder((batch, in_height, in_width, in_channel), name="A") W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W") bias = te.placeholder((1, 1, 1, num_filter), name="bias") a_shape = get_const_tuple(A.shape) w_shape = get_const_tuple(W.shape) bias_shape = get_const_tuple(bias.shape) dtype = A.dtype @memoize("topi.tests.test_topi_conv2d_nhwc_winograd.verify_conv2d_nhwc") def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) c_np += b_np if add_relu: c_np = np.maximum(c_np, 0) return a_np, w_np, b_np, c_np a_np, w_np, b_np, c_np = get_ref_data() def check_device(device):
dev = tvm.device(device, 0) print("Running on target: %s" % device) with tvm.target.Target(device): if bgemm == "direct": fcompute, fschedule = tvm.topi.testing.dispatch( device, _conv2d_nhwc_winograd_direct ) elif bgemm == "tensorcore": fcompute, fschedule = tvm.topi.testing.dispatch( device, _conv2d_nhwc_winograd_tensorcore ) C = fcompute(A, W, stride, padding, dilation, "float32") if add_bias: C = topi.add(C, bias) if add_relu: C = topi.nn.relu(C) s = fschedule([C]) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev) if add_bias: func = tvm.build( s, [A, W, bias, C], device, name="relu_%d_%d_%d_%d_%d_%d_%d_%d" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, b, c) else: func = tvm.build( s, [A, W, C], device, name="relu_%d_%d_%d_%d_%d_%d_%d_%d" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, c) tvm.testing.assert_allclose(c.numpy(), c_np, rtol=2e-3) check_device(devices) @tvm.testing.requires_cuda @tvm.testing.requires_gpu def test_conv2d_nhwc_winograd_direct(): """Test the conv2d with winograd for nhwc layout""" print("test_winograd_direct...") verify_conv2d_nhwc(1, 64, 56, 64, 3, 1, 1, bgemm="direct") verify_conv2d_nhwc(1, 128, 28, 128, 3, 1, 1) verify_conv2d_nhwc(1, 256, 14, 256, 3, 1, 1) verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, 1) verify_conv2d_nhwc(1, 48, 35, 64,
5, 1, 2) verify_conv2d_nhwc(1, 1, 1, 1, 3, 1, 1) verify_conv2d_nhwc(3, 3, 3, 3, 3, 1, 1) verify_conv2d_nhwc(2, 13, 71, 59, 3, 1, 1) verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, "SAME") verify_conv2d_nhwc(2, 48, 56, 48, 3, 1, (1, 1), add_relu=True) verify_conv2d_nhwc(2, 48, 56, 48, 3, 1, "SAME", add_relu=True, add_bias=True) verify_conv2d_nhwc(1, 48, 35, 48, 5, 1, "VALID") @tvm.testing.requires_cuda @tvm.testing.requires_tensorcore def test_conv2d_nhwc_winograd_tensorcore(): """Test the conv2d with winograd for nhwc layout""" verify_conv2d_nhwc(8, 64, 56, 64, 3, 1, 1, bgemm="tensorcore") verify_conv2d_nhwc(8, 128, 28, 128, 3, 1, 1, bgemm="tensorcore") verify_conv2d_nhwc(8, 256, 14, 256, 3, 1, 1, bgemm="tensorcore") verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, (1, 1), add_relu=True, bgemm="tensorcore") verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, "SAME", add_relu=True, bgemm="tensorcore") if __name__ == "__main__": test_conv2d_nhwc_winograd_direct() test_conv2d_nhwc_winograd_tensorcore()
"""Test code for transposed convolution."""
import numpy as np
import tvm from tvm
import te from tvm
import topi
import tvm.topi.testing from tvm.contrib.pickle_memoize
import memoize from tvm.topi.utils
import get_const_tuple
import tvm.testing _conv2d_transpose_nchw_implement = { "generic": (topi.nn.conv2d_transpose_nchw, topi.generic.schedule_conv2d_transpose_nchw), "cpu": (topi.x86.conv2d_transpose_nchw, topi.x86.schedule_conv2d_transpose_nchw), "arm_cpu": (topi.arm_cpu.conv2d_transpose_nchw, topi.arm_cpu.schedule_conv2d_transpose_nchw), "gpu": (topi.cuda.conv2d_transpose_nchw, topi.cuda.schedule_conv2d_transpose_nchw), "hls": (topi.nn.conv2d_transpose_nchw, topi.hls.schedule_conv2d_transpose_nchw), } def verify_conv2d_transpose_nchw( batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding ): in_height, in_width = in_size kernel_height, kernel_width = kernel stride_height, stride_width = stride pad_top, pad_left, pad_bottom, pad_right = padding A = te.placeholder((batch, in_channel, in_height, in_width), name="A") W = te.placeholder((in_channel, num_filter, kernel_height, kernel_width), name="W") a_shape = get_const_tuple(A.shape) w_shape = get_const_tuple(W.shape) dtype = A.dtype @memoize("topi.tests.test_topi_conv2d_transpose.verify_conv2d_transpose_nchw") def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = tvm.topi.testing.conv2d_transpose_nchw_python( a_np, w_np, stride, padding, output_padding ) c_np = np.maximum(b_np, 0) return a_np, w_np, b_np, c_np a_np, w_np, b_np, c_np = get_ref_data() def check(fcompute, fschedule, target, dev): B = fcompute( A, W, [stride_height, stride_width], [pad_top, pad_left, pad_bottom, pad_right], A.dtype, output_padding, ) C = topi.nn.relu(B) s1 = fschedule([B]) s2 = fschedule([C]) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev) func1 = tvm.build(s1, [A, W, B], target) func2 = tvm.build(s2, [A, W, C], target) func1(a, w, b) func2(a, w, c) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) def check_generic(target, dev): print("Running generic on target: %s" % target) with tvm.target.Target(target): fcompute, fschedule = _conv2d_transpose_nchw_implement["generic"] check(fcompute, fschedule, target, dev) check_generic("llvm", tvm.cpu(0)) def check_target(target, dev): print("Running on target: %s" % target) with tvm.target.Target(target): fcompute, fschedule = tvm.topi.testing.dispatch( target, _conv2d_transpose_nchw_implement ) check(fcompute, fschedule, target, dev) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @tvm.testing.uses_gpu def test_conv2d_transpose_nchw(): verify_conv2d_transpose_nchw(1, 3, (224, 224), 1, (1, 1), (1, 1), (0, 0, 0, 0), (0, 0)) verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0)) verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (3, 3), (0, 0, 0, 0), (0, 0)) verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0)) verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (2, 2), (1, 1, 1, 1), (0, 0)) verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (2, 2), (1, 1, 1, 1), (1, 0)) verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (2, 2), (2, 2), (0, 0, 0, 0), (0, 0)) verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (2, 2), (2, 2), (0, 0, 0, 0), (1, 1)) verify_conv2d_transpose_nchw(1, 32, (32, 32), 128, (5, 5), (1, 1), (0, 0, 0, 0), (0, 0)) verify_conv2d_transpose_nchw(1, 32, (32, 32), 128, (5, 5), (2, 2), (1, 1, 1, 1), (0, 0)) verify_conv2
d_transpose_nchw(16, 32, (8192, 1), 8, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0)) verify_conv2d_transpose_nchw(16, 512, (8, 1), 128, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0)) verify_conv2d_transpose_nchw(16, 512, (8, 1), 128, (31, 1), (2, 1), (14, 0, 15, 0), (1, 0)) if __name__ == "__main__": test_conv2d_transpose_nchw()
"""Example code to do convolution."""
import numpy as np
import tvm from tvm
import te from tvm
import autotvm from tvm.autotvm.task.space
import FallbackConfigEntity from tvm
import topi
import tvm.topi.testing from tvm.contrib.pickle_memoize
import memoize from tvm.topi.nn.utils
import get_pad_tuple from tvm.topi.utils
import get_const_tuple
import tvm.testing _conv2d_nchw_winograd_implement = { "arm_cpu": (topi.arm_cpu.conv2d_nchw_winograd, topi.arm_cpu.schedule_conv2d_nchw_winograd), "cuda": (topi.cuda.conv2d_nchw_winograd, topi.cuda.schedule_conv2d_nchw_winograd), "mali": (topi.mali.conv2d_nchw_winograd, topi.mali.schedule_conv2d_nchw_winograd), } def verify_conv2d_nchw( batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, add_bias=False, add_relu=False, devices=["cuda", "llvm -device=arm_cpu", "opencl -device=mali"], ): pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel)) padding_sum = pad_top + pad_left + pad_bottom + pad_right print( "Workload: (%d, %d, %d, %d, %d, %d, %d, %d)" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation) ) in_height = in_width = in_size A = te.placeholder((batch, in_channel, in_height, in_width), name="A") W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W") bias = te.placeholder((num_filter, 1, 1), name="bias") a_shape = get_const_tuple(A.shape) w_shape = get_const_tuple(W.shape) bias_shape = get_const_tuple(bias.shape) dtype = A.dtype @memoize("topi.tests.test_topi_conv2d_winograd.verify_conv2d_nhwc") def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) c_np += b_np if add_relu: c_np = np.maximum(c_np, 0) return a_np, w_np, b_np, c_np a_np, w_np, b_np, c_np = get_ref_data() def check_device(device): dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) with tvm.target.Target(device): fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nchw_winograd_implement) C = fcompute(A, W, stride, padding, dilation, dtype) if add_bias: C = topi.add(C, bias) if add_relu: C = topi.nn.relu(C) s = fschedule([C]) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev) if add_bias: func = tvm.build( s, [A, W, bias, C], device, name="relu_%d_%d_%d_%d_%d_%d_%d_%d" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, b, c) else: func = tvm.build( s, [A, W, C], device, name="relu_%d_%d_%d_%d_%d_%d_%d_%d" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, c) rtol = 1e-3 tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol) for device in devices: check_device(device) @tvm.testing.uses_gpu def test_conv2d_nchw(): verify_conv2d_nchw(1, 128, 17, 192, 7, 1, 3, devices=["cuda"]) verify_conv2d_nchw(1, 128, 17, 128, 7, 1, 3, devices=["cuda"]) verify_conv2d_nchw(1, 160, 17, 160, 7, 1, 3, devices=["cuda"]) verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1) verify_conv2d_nchw(1, 128, 28, 128, 3, 1, 1) verify_conv2d_nchw(1, 256, 14, 256, 3, 1, 1) verify_conv2d_nchw(1, 512, 7, 512, 3, 1, 1) verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1) verify_conv2d_nchw(2, 64, 56, 64, 3,
1, 1, add_bias=True) verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_relu=True) verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_relu=True, add_bias=True) verify_conv2d_nchw(1, 1, 1, 1, 3, 1, 1) verify_conv2d_nchw(3, 3, 3, 3, 3, 1, 1) verify_conv2d_nchw(2, 13, 71, 59, 3, 1, 1) verify_conv2d_nchw(1, 48, 35, 64, 5, 1, 2, devices=["cuda"]) verify_conv2d_nchw(1, 48, 56, 48, 3, 1, (1, 1, 1, 1)) verify_conv2d_nchw(1, 64, 28, 64, 3, 1, (1, 1, 1, 1)) verify_conv2d_nchw(1, 128, 14, 128, 3, 1, (1, 1)) verify_conv2d_nchw(1, 512, 7, 512, 3, 1, "SAME") verify_conv2d_nchw(2, 13, 71, 59, 3, 1, (1, 1, 1, 1)) verify_conv2d_nchw(2, 48, 56, 48, 3, 1, (1, 1, 1, 1), add_bias=True) verify_conv2d_nchw(2, 48, 56, 48, 3, 1, (1, 1), add_relu=True) verify_conv2d_nchw(2, 48, 56, 48, 3, 1, "SAME", add_relu=True, add_bias=True) verify_conv2d_nchw(1, 64, 17, 192, 7, 1, (3, 1), devices=["cuda"]) verify_conv2d_nchw(1, 64, 17, 64, 7, 1, (3, 3, 2, 2), devices=["cuda"]) verify_conv2d_nchw(1, 160, 17, 160, 7, 1, "SAME", devices=["cuda"]) verify_conv2d_nchw(1, 48, 35, 48, 5, 1, "VALID", devices=["cuda"]) def verify_conv2d_nhwc( batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, ): A = te.placeholder((batch, in_size, in_size, in_channel), name="A") W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W") bias = te.placeholder((1, 1, 1, num_filter), name="bias") a_shape = get_const_tuple(A.shape) w_shape = get_const_tuple(W.shape) bias_shape = get_const_tuple(bias.shape) dtype = A.dtype @memoize("topi.tests.test_topi_conv2d_winograd.verify_conv2d_nhwc") def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) c_np
= tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) return a_np, w_np, b_np, c_np a_np, w_np, b_np, c_np = get_ref_data() target = "llvm" dev = tvm.device(target) C = topi.nn.conv2d_winograd_nhwc(A, W, stride, padding, dilation, dtype) s = te.create_schedule([C.op]) a = tvm.nd.array(a_np, device=dev) w = tvm.nd.array(w_np, device=dev) b = tvm.nd.array(b_np, device=dev) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), device=dev) func = tvm.build(s, [A, W, C], target=target) func(a, w, c) rtol = 1e-3 tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol) def test_conv2d_nhwc(): verify_conv2d_nhwc(1, 64, 56, 64, 3, 1, 1) verify_conv2d_nhwc(1, 128, 28, 128, 3, 1, 1) verify_conv2d_nhwc(1, 256, 14, 256, 3, 1, 1) verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, 1) verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, 1) verify_conv2d_nhwc(1, 1, 1, 1, 3, 1, 1) verify_conv2d_nhwc(3, 3, 3, 3, 3, 1, 1) verify_conv2d_nhwc(2, 13, 71, 59, 3, 1, 1) verify_conv2d_nhwc(1, 3, 7, 3, 3, 1, "SAME") verify_conv2d_nhwc(1, 48, 35, 48, 3, 1, "VALID") if __name__ == "__main__": test_conv2d_nchw() test_conv2d_nhwc()
"""Example code to do convolution."""
import numpy as np
import tvm from tvm
import te from tvm
import autotvm from tvm
import topi
import tvm.testing