text
stringlengths 1
2.05k
|
---|
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
def verify_depth_to_space(
block_size, batch, in_channel, in_height, in_width, layout="NCHW", mode="DCR"
):
out_channel = int(in_channel / (block_size * block_size))
out_height = int(in_height * block_size)
out_width = int(in_width * block_size)
if layout == "NCHW":
in_shape = [batch, in_channel, in_height, in_width]
out_shape = [batch, out_channel, out_height, out_width]
elif layout == "NHWC":
in_shape = [batch, in_height, in_width, in_channel]
out_shape = [batch, out_height, out_width, out_channel]
else:
raise NotImplementedError("Layout not supported {}".format(layout))
A = te.placeholder(in_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=in_shape).astype(dtype)
B = topi.nn.depth_to_space(A, block_size=block_size, layout=layout, mode=mode)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 3, 1, 2])
b_np = tvm.topi.testing.depth_to_space_python(a_np, block_size, mode=mode)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 2, 3, 1])
b_np = np.transpose(b_np, axes=[0, 2, 3, 1])
def check_device(device, dev):
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for device, dev in tvm.testing.enabled_targets():
check_device(device, dev)
@tvm.testing.uses_gpu
def test_depth_to_space():
for layout in ["NCHW", "NHWC"]:
for mode in ["DCR", "CDR"]:
verify_depth_to_space(2, 1, 4, 1, 1, layout=layout, mode=mode)
verify_depth_to_space(2, 1, 32, 32, 32, layout=layout, mode=mode)
verify_depth_to_space(8 |
, 1, 256, 32, 32, layout=layout, mode=mode)
verify_depth_to_space(4, 8, 32, 32, 32, layout=layout, mode=mode)
verify_depth_to_space(4, 8, 32, 128, 128, layout=layout, mode=mode)
if __name__ == "__main__":
test_depth_to_space() |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import autotvm, te, topi
from tvm.topi.utils |
import get_const_tuple
from tvm.topi.nn.utils |
import get_pad_tuple
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.nn.depthwise_conv2d |
import _get_workload
from tvm.topi.x86.depthwise_conv2d |
import _fallback_schedule
from tvm.topi.generic |
import conv2d as conv2d_generic
_depthwise_conv2d_implement = {
"NCHW": {
"generic": [(topi.nn.depthwise_conv2d_nchw, topi.generic.schedule_depthwise_conv2d_nchw)],
"arm_cpu": [
(topi.arm_cpu.depthwise_conv2d_nchw, topi.arm_cpu.schedule_depthwise_conv2d_nchw),
(
topi.arm_cpu.depthwise_conv2d_nchw_spatial_pack,
topi.arm_cpu.schedule_depthwise_conv2d_nchw_spatial_pack,
),
],
"gpu": [(topi.cuda.depthwise_conv2d_nchw, topi.cuda.schedule_depthwise_conv2d_nchw)],
"mali": [(topi.mali.depthwise_conv2d_nchw, topi.mali.schedule_depthwise_conv2d_nchw)],
"bifrost": [(topi.nn.depthwise_conv2d_nchw, topi.bifrost.schedule_depthwise_conv2d_nchw)],
"intel_graphics": [
(
topi.intel_graphics.depthwise_conv2d_nchw,
topi.intel_graphics.schedule_depthwise_conv2d_nchw,
)
],
},
"NHWC": {
"generic": [
(topi.nn.depthwise_conv2d_nhwc, topi.generic.schedule_depthwise_conv2d_nhwc),
(topi.nn.depthwise_conv2d_nhwc, conv2d_generic.schedule_depthwise_conv2d_nhwc),
],
"arm_cpu": [
(
topi.arm_cpu.compute_depthwise_conv2d_nhwc,
topi.arm_cpu.schedule_depthwise_conv2d_nhwc,
)
],
"gpu": [(topi.nn.depthwise_conv2d_nhwc, topi.cuda.schedule_depthwise_conv2d_nhwc)],
"mali": [(topi.mali.depthwise_conv2d_nhwc, topi.mali.schedule_depthwise_conv2d_nhwc)],
"bifrost": [(topi.mali.depthwise_conv2d_nhwc, topi.mali.schedule_depthwise_conv2d_nhwc)],
},
"NCHWc": {
"generic": [(topi.x86.depthwise_conv2d_NCHWc, topi.x86.schedule_depthwise_conv2d_NCHWc)],
},
}
random_seed = tvm.testing.parameter(0)
in_dtype, out_dtype = tvm.testing.parameters(
("float32", "float32"),
("float16", "float16"),
)
@tvm.testing.fixture
def input_shape(layout, batch, in_channel, in_size, filter_shape):
if layout == "NCHW": |
return (batch, in_channel, in_size, in_size)
elif layout == "NHWC":
return (batch, in_size, in_size, in_channel)
elif layout == "NCHWc":
oc_block = filter_shape[-1]
ic_block = next(bn for bn in range(oc_block, 0, -1) if in_channel % bn == 0)
return (batch, in_channel
@tvm.testing.fixture
def filter_shape(layout, in_channel, channel_multiplier, kernel):
filter_channel = in_channel
if layout == "NCHW":
return (filter_channel, channel_multiplier, kernel, kernel)
elif layout == "NHWC":
return (kernel, kernel, filter_channel, channel_multiplier)
elif layout == "NCHWc":
out_channel = in_channel * channel_multiplier
oc_block = next(bn for bn in range(16, 0, -1) if out_channel % bn == 0)
return (out_channel
@tvm.testing.fixture
def scale_shape(layout, in_channel, channel_multiplier, filter_shape):
out_channel = in_channel * channel_multiplier
if layout in ("NCHW", "NHWC"):
return (out_channel,)
if layout == "NCHWc":
oc_block = filter_shape[-1]
return (out_channel
raise ValueError("Unknown layout {}".format(layout))
@tvm.testing.fixture
def shift_shape(scale_shape):
return scale_shape
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
random_seed,
in_dtype,
out_dtype,
layout,
input_shape,
filter_shape,
dilation,
stride,
padding,
scale_shape,
shift_shape,
use_scale_shift,
apply_relu,
):
np.random.seed(random_seed)
conv_dtype = "float32" if in_dtype == "float16" else in_dtype
input_np = np.random.uniform(size=input_shape).astype(in_dtype)
filter_np = np.random.uniform(size=filter_shape).astype(in_dtype)
scale_np = np.random.uniform(size=scale_shape).astype(out_dtype)
shift_np = np.random.uniform(size=shift_shape).astype(out_dtype)
if layout == "NCHW":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchw |
dilation = (1, 1, dilation, dilation)
reshape = (1, -1, 1, 1)
elif layout == "NHWC":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nhwc
dilation = (dilation, dilation, 1, 1)
reshape = (1, 1, 1, -1)
elif layout == "NCHWc":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchwc
dilation = (1, 1, dilation, dilation, 1, 1)
reshape = (1, scale_shape[0], 1, 1, scale_shape[1])
dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, dilation)
output_np = np_depthwise_conv2d(
input_np.astype(conv_dtype), dilated_filter_np.astype(conv_dtype), stride, padding
).astype(out_dtype)
if use_scale_shift:
output_np = output_np * scale_np.reshape(reshape) + shift_np.reshape(reshape)
if apply_relu:
output_np = np.maximum(output_np, 0)
return (
input_np,
filter_np,
scale_np,
shift_np,
output_np,
)
class BaseDepthwiseConv2D:
"""Provides the test_conv2d test function, to be used by other test classes.
Test parameter sets are split out into different classes for
readability (e.g. used for mobilenet), and for restrictions
(e.g. implemented only for llvm).
"""
layout = tvm.testing.parameter("NCHW", "NHWC")
(batch, in_channel, in_size, channel_multiplier, kernel, stride) = tvm.testing.parameters(
(1, 728, 32, 1, 3, 1),
(4, 256, 64, 2, 5, 2),
)
padding = tvm.testing.parameter("SAME", "VALID")
dilation = tvm.testing.parameter(1, 2)
use_scale_shift = tvm.testing.parameter(True, False, ids=["with_scale_shift", "no_scale_shift"])
apply_relu = tvm.testing.parameter(True, False, ids=["with_relu", "no_relu"])
run_after_compile = True
def test_conv2d(
self,
target,
dev,
in_dtype,
out_dtype,
layout,
input_shape,
filter_shape,
scale_shape,
shift_shape,
use_scale_shift,
apply_relu, |
batch,
in_channel,
channel_multiplier,
kernel,
stride,
padding,
dilation,
ref_data,
):
target = tvm.target.Target(target)
if (
target.kind.name == "cuda"
and in_dtype == "float16"
and not tvm.contrib.nvcc.have_fp16(dev.compute_version)
):
pytest.xfail("CUDA float16 intrinsics not available")
if (
target.kind.name == "vulkan"
and in_dtype == "float16"
and (
not target.attrs.get("supports_float16", False)
or not target.attrs.get("supports_16bit_buffer", False)
)
):
pytest.xfail("Vulkan float16 driver support not available")
if dilation == 1:
padding_args = get_pad_tuple(padding, (kernel, kernel))
padding_args_i = [0, 1, 2, 3] if layout == "NCHW" else [0, 1]
padding_args = [padding_args[i] for i in padding_args_i]
else:
padding_args = padding
Input = te.placeholder(input_shape, name="Input", dtype=in_dtype)
Filter = te.placeholder(filter_shape, name="Filter", dtype=in_dtype)
Scale = te.placeholder(scale_shape, name="Scale", dtype=out_dtype)
Shift = te.placeholder(shift_shape, name="Shift", dtype=out_dtype)
if layout == "NCHW":
topi_scale_shift = topi.nn.scale_shift_nchw
fcompute_args = (Input, Filter, stride, padding_args, dilation, out_dtype)
elif layout == "NHWC":
topi_scale_shift = topi.nn.scale_shift_nhwc
fcompute_args = (Input, Filter, stride, padding_args, dilation, out_dtype)
elif layout == "NCHWc":
topi_scale_shift = topi.nn.scale_shift_nchwc
in_layout = "NCHW{}c".format(input_shape[-1])
out_layout = "NCHW{}c".format(filter_shape[-1])
fcompute_args = (
Input,
Filter, |
stride,
padding,
dilation,
in_layout,
out_layout,
out_dtype,
)
with autotvm.tophub.context(target):
impl_list = tvm.topi.testing.dispatch(target, _depthwise_conv2d_implement[layout])[:]
if target == "llvm" and layout == "NCHW" and channel_multiplier == 1 and dilation == 1:
impl_list.append(
(topi.x86.depthwise_conv2d_nchw, topi.x86.schedule_depthwise_conv2d_nchw)
)
for fcompute, fschedule in impl_list:
with tvm.target.Target(target):
C = fcompute(*fcompute_args)
if use_scale_shift:
C = topi_scale_shift(C, Scale, Shift)
if apply_relu:
C = topi.nn.relu(C)
s = fschedule(C)
f = tvm.build(s, [Input, Filter, Scale, Shift, C], target)
if self.run_after_compile:
input_np, filter_np, scale_np, shift_np, output_np = ref_data
if "int" in out_dtype:
tol = {"atol": 0, "rtol": 0}
elif out_dtype == "float32":
tol = {"rtol": 1e-4, "atol": 1e-5}
elif out_dtype == "float16":
num_values_summed = kernel * kernel
gap_size = (
np.nextafter(output_np.max(), np.inf, dtype=output_np.dtype)
- output_np.max()
)
tol = {"rtol": 1e-3, "atol": num_values_summed * gap_size / 2}
input_tvm = tvm.nd.array(input_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
scale_tvm = tvm.nd.array(scale_np, dev) |
shift_tvm = tvm.nd.array(shift_np, dev)
output_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(C.shape), dtype=C.dtype),
dev,
)
f(input_tvm, filter_tvm, scale_tvm, shift_tvm, output_tvm)
tvm.testing.assert_allclose(output_np, output_tvm.numpy(), **tol) |
class TestDepthwiseConv2D(BaseDepthwiseConv2D):
"""Test variety of parameters, defined in BaseDepthwiseConv2D. Also
has llvm-specific tests for workload padding."""
@tvm.testing.parametrize_targets("llvm")
def test_workload_padding(
self,
out_dtype,
layout,
input_shape,
filter_shape,
target,
ref_data,
stride,
padding,
dilation,
):
input_np, filter_np, scale_np, shift_np, output_np = ref_data
if layout == "NCHW":
_, _, out_height, out_width = output_np.shape
elif layout == "NHWC":
_, out_height, out_width, _ = output_np.shape
elif layout == "NCHWc":
_, _, out_height, out_width, _ = output_np.shape
Input = te.placeholder(input_shape, name="Input")
Filter = te.placeholder(filter_shape, name="Filter")
wkl = _get_workload(Input, Filter, (stride, stride), padding, dilation, out_dtype, layout)
with tvm.target.Target(target):
cfg = autotvm.get_config()
_fallback_schedule(cfg, wkl)
ow_tile = np.prod(cfg["tile_ow"].size)
tvm.testing.assert_allclose(ow_tile, out_width) |
class TestDepthwiseConv2D_MobilenetWorkloads(BaseDepthwiseConv2D):
"""Extra tests to verify functionality for workloads used by mobilenet."""
layout = tvm.testing.parameter("NCHW")
batch = tvm.testing.parameter(1)
channel_multiplier = tvm.testing.parameter(1)
kernel = tvm.testing.parameter(3)
padding = tvm.testing.parameter("SAME")
dilation = tvm.testing.parameter(1)
in_channel, in_size, stride = tvm.testing.parameters(
(32, 112, 1),
(64, 112, 2),
(128, 56, 1),
(128, 56, 2),
(256, 28, 1),
(256, 28, 2),
(512, 14, 1),
(512, 14, 2),
(1024, 7, 1),
)
@tvm.testing.parametrize_targets("llvm") |
class TestDepthwiseConv2D_NCHWc(BaseDepthwiseConv2D):
"""Tests specific to NCHWc layouts.
Once the implementation supports channel_multiplier>1 and GPU
devices, this class can be merged into TestDepthwiseConv2D.
"""
layout = tvm.testing.parameter("NCHWc")
(batch, in_channel, in_size, channel_multiplier, kernel, stride) = tvm.testing.parameters(
(1, 728, 32, 1, 3, 1),
)
@tvm.testing.parametrize_targets("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu") |
class TestDepthwiseConv2DArmCompile(BaseDepthwiseConv2D):
"""Compile-only tests for cross-compiling to ARM."""
layout = tvm.testing.parameter("NHWC", "NCHW")
batch = tvm.testing.parameter(1)
dilation = tvm.testing.parameter(1)
in_dtype, out_dtype = tvm.testing.parameters(("int16", "int32"))
in_channel = tvm.testing.parameter(728)
in_size = tvm.testing.parameter(32)
kernel = tvm.testing.parameter(1)
channel_multiplier = tvm.testing.parameter(1, 3)
stride = tvm.testing.parameter(1)
padding = tvm.testing.parameter("SAME")
use_scale_shift = tvm.testing.parameter(True, False, ids=["with_scale_shift", "no_scale_shift"])
run_after_compile = False
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te
from tvm |
import topi |
import numpy as np
from tvm.contrib.pickle_memoize |
import memoize
from scipy |
import signal
from tvm.topi.utils |
import get_const_tuple
from tvm.topi.nn.utils |
import get_pad_tuple |
import tvm.topi.testing
from tvm.topi.cuda.depthwise_conv2d |
import schedule_depthwise_conv2d_backward_input_nhwc |
import tvm.testing
def verify_depthwise_conv2d_back_input(
batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h
):
in_w = in_h
filter_channel = in_channel
filter_w = filter_h
stride_w = stride_h
padding_w = padding_h
out_h = np.int((in_h + 2 * padding_h - filter_h) / stride_h + 1)
out_w = np.int((in_w + 2 * padding_w - filter_w) / stride_w + 1)
out_channel = in_channel * channel_multiplier
ishape = [batch, in_h, in_w, in_channel]
oshape = [batch, out_h, out_w, out_channel]
Out_grad = te.placeholder(oshape, name="Out_grad")
Filter = te.placeholder((filter_h, filter_w, filter_channel, channel_multiplier))
In_grad = topi.nn.depthwise_conv2d_backward_input_nhwc(
Filter,
Out_grad,
oshape,
ishape,
stride=[stride_h, stride_w],
padding=[padding_h, padding_w],
)
schedule = schedule_depthwise_conv2d_backward_input_nhwc(In_grad)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
f = tvm.build(schedule, [Filter, Out_grad, In_grad], device)
dtype = Out_grad.dtype
out_grad_shape = get_const_tuple(Out_grad.shape)
filter_shape = get_const_tuple(Filter.shape)
@memoize("topi.tests.test_topi_depthwise_conv2d_backward_input.nhwc")
def get_ref_data():
out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
dilated_out_grad_np = tvm.topi.testing.dilate_python(
out_grad_np, [1, stride_h, stride_w, 1]
)
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(
[padding_h, padding_w], (filter_h, filter_w)
) |
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = (filter_h - 1 - fpad_bottom) + (stride_h - 1)
bpad_left = filter_w - 1 - fpad_left
bpad_right = (filter_w - 1 - fpad_right) + (stride_w - 1)
padded_out_grad = np.zeros(
(
batch,
dilated_out_grad_np.shape[1] + bpad_top + bpad_bottom,
dilated_out_grad_np.shape[2] + bpad_left + bpad_right,
out_channel,
)
)
padded_out_grad[
:,
bpad_top : dilated_out_grad_np.shape[1] + bpad_top,
bpad_left : dilated_out_grad_np.shape[2] + bpad_left,
:,
] = dilated_out_grad_np
in_grad_np = np.zeros((batch, in_h, in_w, in_channel))
for b in range(batch):
for c in range(in_channel):
for m in range(channel_multiplier):
in_grad_np[b, :, :, c] += signal.convolve2d(
padded_out_grad[b, :, :, c * channel_multiplier + m],
filter_np[:, :, c, m],
mode="valid",
)[0:in_h, 0:in_w]
return (out_grad_np, filter_np, in_grad_np)
(out_grad_np, filter_np, in_grad_np) = get_ref_data()
out_grad_tvm = tvm.nd.array(out_grad_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
in_grad_tvm = tvm.nd.array(np.zeros(shape=ishape, dtype=dtype), dev)
timer = f.time_evaluator(f.entry_name, dev, number=1)
tcost = timer(filter_tvm, out_grad_tvm, in_grad_tvm).mean
tvm.testing.assert_allclose(in_grad_np, in_grad_tvm.numpy(), rtol=1e-5)
check_device("opencl")
check_device("cuda")
check_device("metal")
check_device("rocm")
check_device("vulkan")
check_device("nvptx")
@tvm.testing.requires_gpu
def test_topi_depthwise_conv2d_backward_input_nhwc():
verify_depthwise_conv2d_bac |
k_input(16, 256, 56, 1, 3, 1, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 1, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 1, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 1, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 2, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 2, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 2, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 2, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 2, 0)
if __name__ == "__main__":
test_topi_depthwise_conv2d_backward_input_nhwc() |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.topi.testing |
import numpy as np
from tvm.contrib.pickle_memoize |
import memoize
from scipy |
import signal
from tvm.topi.utils |
import get_const_tuple
from tvm.topi.nn.utils |
import get_pad_tuple
from tvm.topi.cuda.depthwise_conv2d |
import schedule_depthwise_conv2d_backward_weight_nhwc |
import tvm.testing
def verify_depthwise_conv2d_back_weight(
batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h
):
in_w = in_h
filter_channel = in_channel
filter_w = filter_h
stride_w = stride_h
padding_w = padding_h
out_h = int((in_h + 2 * padding_h - filter_h) / stride_h + 1)
out_w = int((in_w + 2 * padding_w - filter_w) / stride_w + 1)
out_channel = in_channel * channel_multiplier
oshape = [batch, out_h, out_w, out_channel]
fshape = [filter_h, filter_w, in_channel, channel_multiplier]
Out_grad = te.placeholder(oshape, name="Out_grad")
Input = te.placeholder((batch, in_h, in_w, in_channel), name="In_grad")
Weight_grad = topi.nn.depthwise_conv2d_backward_weight_nhwc(
Input, Out_grad, oshape, fshape, stride=[stride_h, stride_w], padding=[padding_h, padding_w]
)
schedule = schedule_depthwise_conv2d_backward_weight_nhwc(Weight_grad)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
f = tvm.build(schedule, [Input, Out_grad, Weight_grad], device)
dtype = Out_grad.dtype
out_grad_shape = get_const_tuple(Out_grad.shape)
in_shape = get_const_tuple(Input.shape)
@memoize("topi.tests.test_topi_depthwise_conv2d_backward_weight.nhwc")
def get_ref_data():
out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
input_np = np.random.uniform(size=in_shape).astype(dtype)
dilated_out_grad_np = tvm.topi.testing.dilate_python(
out_grad_np, [1, stride_h, stride_w, 1]
)
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(
[padding_h, padding_w], (filter_h, filter_w)
)
padded_input_np = np.zeros(
(batch, in_h + |
pad_top + pad_bottom, in_w + pad_left + pad_right, in_channel)
)
padded_input_np[:, pad_top : in_h + pad_top, pad_left : in_w + pad_left, :] = input_np
weight_grad_np = np.zeros((filter_h, filter_w, in_channel, channel_multiplier))
for c in range(in_channel):
for m in range(channel_multiplier):
for b in range(batch):
weight_grad_np[:, :, c, m] += signal.convolve2d(
padded_input_np[b, :, :, c],
np.rot90(
dilated_out_grad_np[
b, :, :, c * channel_multiplier + m % channel_multiplier
],
2,
),
mode="valid",
)[0:filter_h, 0:filter_w]
return (out_grad_np, input_np, weight_grad_np)
(out_grad_np, input_np, weight_grad_np) = get_ref_data()
out_grad_tvm = tvm.nd.array(out_grad_np, dev)
input_tvm = tvm.nd.array(input_np, dev)
weight_grad_tvm = tvm.nd.array(np.zeros(shape=fshape, dtype=dtype), dev)
timer = f.time_evaluator(f.entry_name, dev, number=1)
tcost = timer(input_tvm, out_grad_tvm, weight_grad_tvm).mean
tvm.testing.assert_allclose(weight_grad_np, weight_grad_tvm.numpy(), rtol=1e-4)
check_device("opencl")
check_device("cuda")
check_device("metal")
check_device("rocm")
check_device("vulkan")
check_device("nvptx")
@tvm.testing.requires_gpu
def test_topi_depthwise_conv2d_backward_weight_nhwc():
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 1, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 1, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 1, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 5, 1, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 2, 1)
verify_depthwise_conv2d_back_w |
eight(16, 256, 56, 2, 3, 2, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 2, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 5, 2, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 5, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 2, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 2, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 2, 0)
verify_depthwise_conv2d_back_weight(15, 256, 56, 2, 5, 2, 0)
if __name__ == "__main__":
test_topi_depthwise_conv2d_backward_weight_nhwc() |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing |
import numpy as np
def test_dilate():
target = "llvm"
dev = tvm.cpu(0)
def _test_dilate(input_size, strides, dilation_value=None):
Input = te.placeholder((input_size))
if dilation_value is None:
Output = topi.nn.dilate(Input, strides)
else:
Output = topi.nn.dilate(Input, strides, dilation_value)
schedule = te.create_schedule(Output.op)
input_np = np.random.uniform(size=input_size).astype(Input.dtype)
if dilation_value is None:
output_np = tvm.topi.testing.dilate_python(input_np, strides)
else:
output_np = tvm.topi.testing.dilate_python(input_np, strides, dilation_value)
input_tvm = tvm.nd.array(input_np, device=dev)
output_size = topi.utils.get_const_tuple(Output.shape)
output_tvm = tvm.nd.array(np.zeros(shape=output_size).astype(Output.dtype), device=dev)
f = tvm.build(schedule, [Input, Output], target)
f(input_tvm, output_tvm)
tvm.testing.assert_allclose(output_tvm.numpy(), output_np, rtol=1e-5)
_test_dilate((32,), (2,))
_test_dilate((32, 32), (2, 2))
_test_dilate((1, 3, 32, 32), (1, 1, 1, 1))
_test_dilate((1, 3, 32, 32), (2, 2, 2, 2))
_test_dilate((1, 32, 32, 3, 3), (1, 1, 1, 1, 1))
_test_dilate((1, 32, 32, 3, 3), (2, 2, 2, 2, 2))
_test_dilate((1, 32, 32, 32, 3, 3), (1, 1, 1, 2, 2, 2))
_test_dilate((1, 32, 32, 32, 3, 3), (2, 2, 2, 1, 1, 1))
_test_dilate((1, 32, 32, 32, 3, 3), (2, 2, 2, 1, 1, 1), 1.0)
if __name__ == "__main__":
test_dilate() |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import te
from tvm |
import topi
from tvm.topi.utils |
import get_const_tuple
def with_tvm(lam, *args):
"""Take numpy arrays as args, convert them to TVM tensors and call `lam`.
Result of lambda is converted back to numpy array and returned.
"""
dev = tvm.cpu(0)
pls = []
vals_nd = []
for i, arg in enumerate(args):
pls.append(te.placeholder(arg.shape, name="pl" + str(i)))
vals_nd.append(tvm.nd.array(arg, dev))
out = lam(*pls)
out_nd = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out.dtype), dev)
s = te.create_schedule([out.op])
m = tvm.build(s, pls + [out], "llvm")
m(*(vals_nd + [out_nd]))
return out_nd.numpy()
def verify_einsum(subscripts, shapes):
ops = []
for shape in shapes:
tmp = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(np.float32)
ops.append(tmp)
c1 = np.einsum(subscripts, *ops)
if len(ops) == 1:
c2 = with_tvm(lambda A: topi.einsum(subscripts, A), *ops)
elif len(ops) == 2:
c2 = with_tvm(lambda A, B: topi.einsum(subscripts, A, B), *ops)
elif len(ops) == 3:
c2 = with_tvm(lambda A, B, C: topi.einsum(subscripts, A, B, C), *ops)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
@pytest.mark.parametrize(
"equation,inputs",
[
("ii", [(5, 5)]),
("ii->i", [(5, 5)]),
("ij->i", [(5, 5)]),
("...j->...", [(5, 5)]),
("...j, j", [(5, 5), (5,)]),
("..., ...", [(), (2, 3)]),
("ijk, jil->kl", [(3, 4, 5), (4, 3, 2)]),
("ij, ij -> i", [(1, 4), (2, 4)]),
("...ij, ...jk -> ...ik", [(1, 4), (4, 2)]),
("...ij, ...ik -> ...jk", [(1, 1, 1, 4), (1, 1, 1, 3)]),
("...ik, ...jk, ...hk -> i...jh", [(3, 4, 4), (1, 5, 3, 8, 4), (2, 5, 3, 6, 4)]),
("ij,jk->ik", [(2, 3), (3, 4)]),
("ij,jk,km->im", [(2, 3), (3, 4), (4, 5)]),
],
)
def test_einsum(equation, inputs):
verify_einsum(equation, inputs)
if __name__ == "__main__":
tvm.testing.main() |
"""Example code to do group convolution.""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm.autotvm.task.space |
import FallbackConfigEntity
from tvm |
import topi |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.utils |
import get_const_tuple
from common |
import Int8Fallback |
import tvm.testing
def _transform_data(data, bn):
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(kernel, (out_channel
kernel = np.transpose(kernel, (0, 2, 4, 5, 1, 3))
return kernel
_group_conv2d_nchw_implement = {
"generic": (topi.nn.group_conv2d_nchw, topi.generic.schedule_group_conv2d_nchw),
"gpu": (topi.cuda.group_conv2d_nchw, topi.cuda.schedule_group_conv2d_nchw),
}
_group_conv2d_nhwc_implement = {
"generic": (topi.nn.group_conv2d_nhwc, topi.generic.schedule_group_conv2d_nhwc),
}
def verify_group_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
if add_bias:
b_np = np.random.uniform(size=b |
ias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv2d_nchw_implement)
C = fcompute(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.t |
esting.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["llvm", "cuda"]:
check_target(target)
oc_block_factor = 4
ic_block_factor = 4
def verify_group_conv2d_NCHWc_int8(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder(
(batch, in_channel
name="A",
dtype="int8",
)
W = te.placeholder(
(
num_filter
(in_channel
kernel,
kernel,
oc_block_factor,
ic_block_factor,
),
name="W",
dtype="int8",
)
bias = te.placeholder(
(num_filter
)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.randint(
low=-128, high=127, size=(batch, in_channel, in_height, in_width)
).astype(dtype)
w_np = np.random.randint(
low=-128, high=128, size=(num_filter, in_channel
).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return (
_transform_data(a_np, ic_block_facto |
r),
_transform_kernel(w_np, ic_block_factor, oc_block_factor),
b_np,
c_np,
)
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.cuda.group_conv2d_NCHWc_int8(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_group_conv2d_NCHWc_int8([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilati |
on,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["cuda"]:
check_target(target)
def verify_group_conv2d_nchw_int8(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype="int8")
W = te.placeholder((num_filter, in_channel
bias = te.placeholder(
(num_filter
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nchw_int8")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip bec |
ause %s is not enabled" % target)
return
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.cuda.group_conv2d_NCHWc_int8(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_group_conv2d_NCHWc_int8([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["cuda"]:
check_target(target)
def verify_group_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel |
,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
W = te.placeholder((kernel, kernel, in_channel
bias = te.placeholder((1, 1, num_filter), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv2d_nhwc_implement)
C = fcompute(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm. |
nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
@tvm.testing.uses_gpu
def test_group_conv2d_nchw():
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True) |
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 2, 32)
verify_group_conv2d_nchw(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(9, 128, 56, 128, 3, 1, 1, 1, 32)
@tvm.testing.requires_cuda
def test_group_conv2d_NCHWc_int8():
with Int8Fallback():
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_NCHWc_int8(
1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True
)
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 2, 32)
verify_group_conv2d_NCHWc_int8(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(9, 128, 56, 128, 3, 1, 1, 1, 32)
@tvm.testing.requires_cuda
def test_group_conv2d_nchw_int8():
with Int8Fallback():
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
ve |
rify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True)
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 2, 32)
verify_group_conv2d_nchw_int8(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(9, 128, 56, 128, 3, 1, 1, 1, 32)
def test_group_conv2d_nhwc():
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nhwc(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nhwc(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nhwc(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True)
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 2, 32)
verify_group_conv2d_nhwc(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(9, 128, 56, 128, 3, 1, 1, 1, 32)
if __name__ == "__main__":
test_group_conv2d_nchw()
test_group_conv2d_NCHWc_int8()
test_group_conv2d_nchw_int8()
test_group_conv2d_nhwc() |
"""Test for NCHW[x]c convolution""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.utils |
import get_const_tuple |
import pytest
def _transform_data(data, bn):
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(
kernel, (out_channel
)
kernel = np.transpose(kernel, (0, 2, 4, 5, 3, 1, 6))
return kernel
def verify_group_conv2d_NCHWc_int8(
batch,
in_channel,
groups,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
dtype="int32",
):
assert dilation == 1, "conv2d_NCHWc does not support dilation for now."
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, groups, in_size, num_filter, kernel, stride, padding)
)
in_height = in_width = in_size
oc_block = 1
for bn in range(16, 0, -1):
if num_filter % bn == 0:
oc_block = bn
break
ic_block = 8
autotvm.GLOBAL_SCOPE.silent = True
A = te.placeholder(
(batch, in_channel
)
W = te.placeholder(
(
num_filter
in_channel
kernel,
kernel,
ic_block
oc_block,
4,
),
name="W",
dtype="int8",
)
@memoize("topi.tests.test_topi_conv2d_NCHWc_int8.verify_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype("uint8")
w_np = np.random.uniform(size=(num_filter, in_channel
"int8"
)
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups)
return (
_transform_data(a_np, ic_block),
_transform_kernel(w_np, ic_block, oc_block),
_transform_data(c_np, oc_block),
)
a_np, w_np, c_np = get_ref_data()
def check_d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.