text
stringlengths
1
2.05k
rget, name="update_context") out = conv2d_nchw(context, kernel, stride, padding, dilate, dtype) s = schedule_conv2d_nchw([out]) conv2d_inc = tvm.build(s, [context, kernel, out], target, name="conv2d_inc") out = topi.nn.fifo_buffer(inc_output, output_window, axis=buffer_axis) s = tvm.topi.testing.get_injective_schedule(target)([out]) update_output_window = tvm.build( s, [inc_output, output_window, out], target, name="update_output_window" ) out = topi.nn.fifo_buffer(inc_input, input_window, axis=buffer_axis) s = tvm.topi.testing.get_injective_schedule(target)([out]) update_input_window = tvm.build( s, [inc_input, input_window, out], target, name="update_input_window" ) out = conv2d_nchw(input_window, kernel, stride, padding, dilate, dtype) s = schedule_conv2d_nchw([out]) conv2d = tvm.build(s, [input_window, kernel, out], target, name="conv2d") input_window_tvm = tvm.nd.array(input_window_np, device=dev) new_input_window_tvm = tvm.nd.empty(shape=input_window_shape, device=dev, dtype=dtype) kernel_tvm = tvm.nd.array(kernel_np, device=dev) context_tvm = tvm.nd.array(context_np, device=dev) new_context_tvm = tvm.nd.empty(shape=context_shape, device=dev, dtype=dtype) inc_output_tvm = tvm.nd.empty(shape=inc_output_shape, device=dev, dtype=dtype) output_window_tvm = tvm.nd.array(output_window_np, device=dev) new_output_window_tvm = tvm.nd.empty(shape=output_window_shape, device=dev, dtype=dtype) output_window_ref_tvm = tvm.nd.empty(shape=output_window_shape, device=dev, dtype=dtype) for i in range(num_iteration): inc_input_tvm = tvm.nd.array(inc_input_np[i], device=dev) update_context(inc_input_tvm, context_tvm, new_context_tvm) conv2d_inc(new_context_tvm, kernel_tvm, inc_output
_tvm) update_output_window(inc_output_tvm, output_window_tvm, new_output_window_tvm) context_tvm = new_context_tvm output_window_tvm = new_output_window_tvm update_input_window(inc_input_tvm, input_window_tvm, new_input_window_tvm) input_window_tvm = new_input_window_tvm conv2d(input_window_tvm, kernel_tvm, output_window_ref_tvm) tvm.testing.assert_allclose(output_window_tvm.numpy(), output_window_ref_tvm.numpy()) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @tvm.testing.uses_gpu def test_fifo_buffer(): for ndim in [1, 2, 3, 4, 5, 6]: for axis in range(ndim): buffer_shape = tuple(7 for _ in range(ndim)) data_shape = tuple((2 if i == axis else 7) for i in range(ndim)) print( "Testing FIFO buffer op: buffer_shape = {}, data_shape = {}, axis = {}".format( buffer_shape, data_shape, axis ) ) verify_fifo_buffer(buffer_shape, data_shape, axis) @tvm.testing.uses_gpu def test_conv1d_integration(): print("Testing FIFO buffer with 1D convolution") verify_conv1d_integration() if __name__ == "__main__": test_fifo_buffer() test_conv1d_integration()
"""Test for argwhere operator"""
import numpy as np
import pytest
import tvm
import tvm.testing from tvm
import te from tvm
import topi
import tvm.topi.testing _argwhere_schedule = { "generic": topi.generic.schedule_argwhere, "gpu": topi.cuda.schedule_argwhere, } _argwhere_compute = {"llvm": topi.argwhere, "cuda": topi.cuda.argwhere} data_shape = tvm.testing.parameter( (1,), (100,), (1, 1), (5, 3), (32, 64), (128, 65), (200, 500), (6, 5, 3), (1, 1, 1), (1, 1, 1, 1), (6, 4, 5, 3), (1, 1, 1, 1, 1), (6, 4, 5, 3, 7), ) @tvm.testing.parametrize_targets("llvm", "cuda") def test_argwhere(target, dev, data_shape): dtype = "int32" np_data = np.random.choice([0, 1, 2, 3], size=data_shape).astype(dtype) np_out = np.argwhere(np_data) out_shape = np_out.shape[0] np_shape = np.ones(shape=(out_shape, len(data_shape)), dtype=dtype) out_shape = te.placeholder(shape=(out_shape, len(data_shape)), name="out_shape", dtype=dtype) condition = te.placeholder(shape=data_shape, name="condition", dtype=dtype) with tvm.target.Target(target): out = _argwhere_compute[target](out_shape, condition) s_func = tvm.topi.testing.dispatch(target, _argwhere_schedule) sch = s_func(out) func = tvm.build(sch, [out_shape, condition, out], target, name="argwhere") args = [tvm.nd.array(np_shape, dev)] args.append(tvm.nd.array(np_data, dev)) args.append(tvm.nd.empty(out.shape, device=dev, dtype=condition.dtype)) func(*args) np.set_printoptions(threshold=np.inf) tvm_out = args[-1].numpy() tvm.testing.assert_allclose(tvm_out, np_out) if __name__ == "__main__": tvm.testing.main()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te from tvm import topi from tvm.topi import utils def test_util(): x = tvm.tir.const(100, "int32") assert utils.get_const_int(x) == 100 assert utils.get_const_tuple((x, x)) == (100, 100) def test_ewise(): m = te.var("m") l = te.var("l") A = te.placeholder((m, l), name="A") def test_apply(func, name): B = func(A) assert tuple(B.shape) == tuple(A.shape) assert B.op.body[0].op.name == "tir." + name test_apply(topi.exp, "exp") test_apply(topi.erf, "erf") test_apply(topi.tanh, "tanh") test_apply(topi.sigmoid, "sigmoid") test_apply(topi.log, "log") test_apply(topi.sqrt, "sqrt") test_apply(topi.rsqrt, "rsqrt") test_apply(topi.sin, "sin") test_apply(topi.cos, "cos") test_apply(topi.tan, "tan") test_apply(topi.atan, "atan") if __name__ == "__main__": test_util() test_ewise()
"""Test code for batch_matmul operator"""
import numpy as np
import tvm from tvm
import te from tvm
import topi
import tvm.topi.testing from tvm.topi.utils
import get_const_tuple from tvm.contrib.pickle_memoize
import memoize
import tvm.testing from common
import Int8Fallback _batch_matmul_implement = { "generic": (topi.nn.batch_matmul, topi.generic.schedule_batch_matmul), "cpu": (topi.x86.batch_matmul, topi.x86.schedule_batch_matmul), "gpu": (topi.cuda.batch_matmul, topi.cuda.schedule_batch_matmul), } def verify_batch_matmul(x_batch, y_batch, M, N, K, dynamic=False, debug=False): if not dynamic: x = te.placeholder((x_batch, M, K), name="x") y = te.placeholder((y_batch, N, K), name="y") dtype = x.dtype else: assert x_batch == y_batch or x_batch == 1 or y_batch == 1 batch_size = max(x_batch, y_batch) dynamic_batch_size = te.var("dynamic_batch_size") dynamic_M = te.var("dynamic_M") dynamic_N = te.var("dynamic_N") dynamic_K = te.var("dynamic_K") x = te.placeholder((dynamic_batch_size, dynamic_M, dynamic_K), name="x") y = te.placeholder((dynamic_batch_size, dynamic_N, dynamic_K), name="y") dtype = x.dtype @memoize("topi.tests.test_topi_batch_matmul") def get_ref_data(): a_np = np.random.uniform(size=(x_batch, M, K)).astype(dtype) b_np = np.random.uniform(size=(y_batch, N, K)).astype(dtype) c_np = tvm.topi.testing.batch_matmul(a_np, b_np) return (a_np, b_np, c_np) a_np, b_np, c_np = get_ref_data() def check_device(target, dev): print("Running on target: %s" % target) with tvm.target.Target(target): fcompute, fschedule = tvm.topi.testing.dispatch(target, _batch_matmul_implement) out = fcompute(x, y) if not dynamic: s = fschedule([out]) out_shape = out.shape else: s = te.create_schedule(out.op) out_shape = (batch_size, M, N) if debug: print(tvm.lower(s, [x, y, out], simple_mode=True)) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros(get_const_tuple(out_shape), dtype=dtype), dev)
f = tvm.build(s, [x, y, out], target, name="dense") f(a, b, c) tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): target_kind = tvm.target.Target(target).kind.name if dynamic and target_kind in ["cuda", "nvptx", "vulkan", "opencl"]: print("Dynamic batch matmul test is skippped on %s" % target) continue check_device(target, dev) def verify_batch_matmul_int8(x_batch, y_batch, M, N, K): dtype = "int8" out_dtype = "int32" assert x_batch == y_batch or x_batch == 1 or y_batch == 1 x = te.placeholder((x_batch, M, K), name="x", dtype=dtype) y = te.placeholder((y_batch, N, K), name="y", dtype=dtype) @memoize("topi.tests.test_topi_batch_matmul") def get_ref_data(): a_np = np.random.randint(low=-128, high=127, size=(x_batch, M, K)).astype(dtype) b_np = np.random.randint(low=-128, high=127, size=(y_batch, N, K)).astype(dtype) c_np = tvm.topi.testing.batch_matmul(a_np, b_np, out_dtype=out_dtype) return (a_np, b_np, c_np) a_np, b_np, c_np = get_ref_data() def check_device(device): dev = tvm.device(device, 0) if device == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version): print("Skip because int8 intrinsics are not available") return print("Running on target: %s" % device) with tvm.target.Target(device): out = topi.cuda.batch_matmul_int8(x, y, None, out_dtype) s = topi.cuda.schedule_batch_matmul_int8([out]) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out_dtype), dev) f = tvm.build(s, [x, y, out], device, name="batch_matmul_int8") f(a, b, c) tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for device in ["cuda"]: check_device(device) @tvm.testing.uses_gpu def test_batch_matmul(): veri
fy_batch_matmul(1, 1, 16, 16, 32) verify_batch_matmul(5, 5, 16, 16, 32) verify_batch_matmul(5, 5, 16, 20, 32) verify_batch_matmul(30, 30, 16, 20, 32) verify_batch_matmul(1, 5, 16, 16, 32) verify_batch_matmul(5, 1, 16, 16, 32) verify_batch_matmul(1, 1, 16, 16, 32, dynamic=True) verify_batch_matmul(5, 5, 16, 16, 32, dynamic=True) @tvm.testing.requires_cuda @tvm.testing.requires_gpu def test_batch_matmul_int8(): with Int8Fallback(): verify_batch_matmul_int8(1, 1, 2, 3, 1) verify_batch_matmul_int8(1, 1, 16, 24, 32) verify_batch_matmul_int8(5, 5, 24, 16, 32) verify_batch_matmul_int8(30, 30, 16, 20, 32) verify_batch_matmul_int8(1, 5, 16, 16, 32) verify_batch_matmul_int8(5, 1, 16, 16, 32) if __name__ == "__main__": test_batch_matmul() test_batch_matmul_int8()
"""Test code for batch_matmul operator"""
import numpy as np
import tvm from tvm
import te from tvm
import topi
import tvm.topi.testing from tvm.topi.utils
import get_const_tuple from tvm.contrib.pickle_memoize
import memoize
import tvm.testing _batch_matmul_implement = { "gpu": (topi.cuda.batch_matmul_tensorcore, topi.cuda.schedule_batch_matmul_tensorcore), } def convert_int32_into_int4(a_int32): """convert int32 values into int4 Parameters ---------- a_int32 : int Return ------ a_int4 : int """ B, K, L = a_int32.shape assert L % 8 == 0 a_int4 = np.zeros(shape=(B, K, L for b in range(B): for k in range(K): for l in range(L for m in range(min(8, L - l * 8)): a_int4[b, k, l] = a_int4[b, k, l] | ( (a_int32[b, k, l * 8 + m] & 0xF) << ((7 - m) * 4) ) return a_int4 def verify_batch_matmul(x_batch, y_batch, M, N, K, dtype): x = te.placeholder((x_batch, M, K), name="x", dtype=dtype) y = te.placeholder((y_batch, N, K), name="y", dtype=dtype) assert dtype in ["int4", "int8", "float16"] out_dtype = "float32" if dtype in ["int8", "int4"]: out_dtype = "int32" @memoize("topi.tests.test_topi_batch_matmul_tensorcore") def get_ref_data(): if dtype == "int4": a_np = np.random.randint(low=-8, high=7, size=(x_batch, M, K)) b_np = np.random.randint(low=-8, high=7, size=(y_batch, N, K)) elif dtype == "int8": a_np = np.random.randint(low=-128, high=127, size=(x_batch, M, K)).astype(dtype) b_np = np.random.randint(low=-128, high=127, size=(y_batch, N, K)).astype(dtype) else: a_np = np.random.uniform(size=(x_batch, M, K)).astype(dtype) b_np = np.random.uniform(size=(y_batch, N, K)).astype(dtype) c_np = tvm.topi.testing.batch_matmul(a_np, b_np, out_dtype) return (a_np, b_np, c_np) a_np, b_np, c_np = get_ref_data() if dtype == "int4": a_np = convert_int32_into_int4(a_np) b_np = convert_int32_into_int4(b_np) def check_device(device): dev = tvm.device(device, 0) print("Running on target: %s" % devi
ce) with tvm.target.Target(device): fcompute, fschedule = tvm.topi.testing.dispatch(device, _batch_matmul_implement) out = fcompute(x, y, None, out_dtype) s = fschedule([out]) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out_dtype), dev) f = tvm.build(s, [x, y, out], device, name="batch_matmul") f(a, b, c) tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3) check_device("cuda") @tvm.testing.requires_tensorcore def test_batch_matmul(): for dtype in ["float16", "int8", "int4"]: verify_batch_matmul(1, 1, 16, 16, 32, dtype) verify_batch_matmul(5, 5, 16, 16, 32, dtype) verify_batch_matmul(5, 5, 16, 32, 32, dtype) verify_batch_matmul(30, 30, 16, 32, 32, dtype) if __name__ == "__main__": test_batch_matmul()
"""Tests for the batch_norm operator."""
import numpy as np
import pytest
import tvm from tvm
import te from tvm
import topi
import tvm.testing
import tvm.topi.testing _DEVICE = "llvm" _BATCH_NORM_IMPLEMENT = { "generic": (topi.nn.batch_norm, topi.generic.schedule_batch_norm), "cpu": (topi.nn.batch_norm, topi.x86.schedule_batch_norm), } @pytest.mark.parametrize( "shape, axis, epsilon, center, scale", [ ((1,), 0, 0.1, True, True), ((2, 3), 0, 0.1, True, True), ((1, 2, 4), 0, 0.1, True, True), ((1, 2, 3, 4), 0, 0.001, False, False), ((2, 3, 4, 1), 1, 0.01, False, True), ((3, 4, 1, 2), 2, 0.1, True, False), ((4, 1, 2, 3), 3, 1.0, True, True), ((1, 2, 4, 4, 5), 0, 0.1, True, True), ], ) def test_batch_norm(shape, axis, epsilon, center, scale): x_np = np.random.random(shape).astype("float32") gamma_np = np.random.random(shape[axis]).astype("float32") beta_np = np.random.random(shape[axis]).astype("float32") moving_mean_np = np.random.random(shape[axis]).astype("float32") moving_var_np = np.random.random(shape[axis]).astype("float32") out_x_np, out_moving_mean_np, out_moving_var_np = tvm.topi.testing.batch_norm( x_np, gamma_np, beta_np, moving_mean_np, moving_var_np, axis, epsilon, center, scale ) x_te = te.placeholder(shape, name="x", dtype="float32") gamma_te = te.placeholder((shape[axis],), name="gamma", dtype="float32") beta_te = te.placeholder((shape[axis],), name="beta", dtype="float32") moving_mean_te = te.placeholder((shape[axis],), name="moving_mean", dtype="float32") moving_var_te = te.placeholder((shape[axis],), name="moving_var", dtype="float32") with tvm.target.Target(_DEVICE): fcompute, fschedule = tvm.topi.testing.dispatch(_DEVICE, _BATCH_NORM_IMPLEMENT) out_x, out_moving_mean, out_moving_var = fcompute( x_te, gamma_te, beta_te, moving_mean_te, moving_var_te, axis, epsilon, center, scale ) s = fschedule([out_x, out_moving_mean, out_moving_var]) dev = tvm.device(_DEVICE, 0) x_tvm = tvm.nd.array(x_np, dev) gamma_tvm = tvm.nd.array
(gamma_np, dev) beta_tvm = tvm.nd.array(beta_np, dev) moving_mean_tvm = tvm.nd.array(moving_mean_np, dev) moving_var_tvm = tvm.nd.array(moving_var_np, dev) out_x_tvm = tvm.nd.array(np.zeros(shape, dtype=out_x.dtype), dev) out_moving_mean_tvm = tvm.nd.array( np.zeros((shape[axis],), dtype=out_moving_mean.dtype), dev ) out_moving_var_tvm = tvm.nd.array(np.zeros((shape[axis],), dtype=out_moving_var.dtype), dev) f = tvm.build( s, [ x_te, gamma_te, beta_te, moving_mean_te, moving_var_te, out_x, out_moving_mean, out_moving_var, ], _DEVICE, ) f( x_tvm, gamma_tvm, beta_tvm, moving_mean_tvm, moving_var_tvm, out_x_tvm, out_moving_mean_tvm, out_moving_var_tvm, ) tvm.testing.assert_allclose(out_x_tvm.numpy(), out_x_np, rtol=1e-3) tvm.testing.assert_allclose(out_moving_mean_tvm.numpy(), out_moving_mean_np, rtol=1e-3) tvm.testing.assert_allclose(out_moving_var_tvm.numpy(), out_moving_var_np, rtol=1e-3) if __name__ == "__main__": test_batch_norm()
"""Test code for batch to space"""
import numpy as np
import tvm from tvm
import te from tvm
import topi
import tvm.testing
import tvm.topi.testing def verify_batch_to_space_nd(input_shape, block_shape, crop_begin_list, crop_end_list): out_shape = [] out_shape.append(int((input_shape[0] / np.prod(block_shape)))) for i in range(1, len(block_shape) + 1): crop = crop_begin_list[i - 1] + crop_end_list[i - 1] out_shape.append(input_shape[i] * block_shape[i - 1] - crop) for i in range(len(block_shape) + 1, len(input_shape)): out_shape.append(input_shape[i]) A = te.placeholder(input_shape, name="A", dtype="float32") dtype = A.dtype a_np = np.random.uniform(size=input_shape).astype(dtype) B = topi.nn.batch_to_space_nd(A, block_shape, crop_begin_list, crop_end_list) b_np = tvm.topi.testing.batch_to_space_nd_python( a_np, block_shape, crop_begin_list, crop_end_list ) def check_device(target, dev): print("Running on target: %s" % target) with tvm.target.create(target): s = tvm.topi.testing.get_injective_schedule(target)(B) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev) f = tvm.build(s, [A, B], target) f(a, b) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @tvm.testing.uses_gpu def test_batch_to_space(): verify_batch_to_space_nd([4, 1, 1, 1], [2, 2], [0, 0], [0, 0]) verify_batch_to_space_nd([8, 1, 3, 1], [2, 2], [0, 2], [0, 0]) verify_batch_to_space_nd([18, 2, 1, 2], [2, 3], [1, 1], [0, 0]) verify_batch_to_space_nd([20, 5, 8, 7], [2, 2], [1, 1], [1, 1]) if __name__ == "__main__": test_batch_to_space()
import numpy as np
import tvm from tvm
import te from tvm
import topi
import tvm.testing
import tvm.topi.testing from tvm.topi.utils
import get_const_tuple from tvm.contrib.pickle_memoize
import memoize def generate_quantized_np(shape, bits, out_dtype): min_val = 0 max_val = 1 << bits return np.random.randint(min_val, max_val, size=shape).astype(out_dtype) def verify_bitserial_conv2d_nchw( batch, in_size, in_channel, num_filter, kernel, stride, padding, activation_bits, weight_bits, unipolar, ): in_height = in_width = in_size input_dtype = "uint32" out_dtype = "int32" with tvm.target.Target("llvm"): A = te.placeholder((batch, in_channel, in_height, in_width), dtype=input_dtype, name="A") W = te.placeholder((num_filter, in_channel, kernel, kernel), dtype=input_dtype, name="W") B = topi.x86.bitserial_conv2d_nchw( A, W, stride, padding, activation_bits, weight_bits, input_dtype, out_dtype, unipolar ) s = topi.x86.schedule_bitserial_conv2d_nchw([B]) a_shape = get_const_tuple(A.shape) w_shape = get_const_tuple(W.shape) @memoize("topi.tests.test_topi_bitseral_conv2d_nchw") def get_ref_data(): a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype) w_np = generate_quantized_np(get_const_tuple(w_shape), weight_bits, input_dtype) if unipolar: w_ = np.copy(w_np).astype(out_dtype) for x in np.nditer(w_, op_flags=["readwrite"]): x[...] = 1 if x == 1 else -1 b_np = tvm.topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding) else: b_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding) return a_np, w_np, b_np a_np, w_np, b_np = get_ref_data() dev = tvm.cpu(0) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, W, B], "llvm") func(a, w, b) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) def verify_bitserial_conv2d_nhwc( batch, in_size, in_channel,
num_filter, kernel, stride, padding, activation_bits, weight_bits, unipolar, ): in_height = in_width = in_size input_dtype = "uint32" out_dtype = "int32" with tvm.target.Target("llvm"): A = te.placeholder((batch, in_height, in_width, in_channel), dtype=input_dtype, name="A") W = te.placeholder((kernel, kernel, in_channel, num_filter), dtype=input_dtype, name="W") B = topi.x86.bitserial_conv2d_nhwc( A, W, stride, padding, activation_bits, weight_bits, input_dtype, out_dtype, unipolar ) s = topi.x86.schedule_bitserial_conv2d_nhwc([B]) a_shape = get_const_tuple(A.shape) w_shape = get_const_tuple(W.shape) @memoize("topi.tests.test_topi_bitseral_conv2d_nhwc") def get_ref_data(): a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype) w_np = generate_quantized_np(get_const_tuple(w_shape), weight_bits, input_dtype) if unipolar: w_ = np.copy(w_np).astype(out_dtype) for x in np.nditer(w_, op_flags=["readwrite"]): x[...] = 1 if x == 1 else -1 b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype) else: b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype( out_dtype ) return a_np, w_np, b_np a_np, w_np, b_np = get_ref_data() dev = tvm.cpu(0) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, W, B], "llvm") func(a, w, b) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) def test_bitserial_conv2d(): in_size = 56 ic, oc = 64, 64 k = 3 stride = 1 pad = 1 verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, True) verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, True) verify_bitserial_conv2d
_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, False) verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, False) verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 2, False) verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, True) verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True) verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, False) verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, False) verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 2, False) if __name__ == "__main__": test_bitserial_conv2d()
import os
import re
import numpy as np
import tvm from tvm
import te from tvm
import topi
import tvm.topi.testing from tvm.topi.utils
import get_const_tuple def generate_quantized_np(shape, bits, out_dtype): np.random.seed(0) min_val = 0 max_val = 1 << bits return np.random.randint(min_val, max_val, size=shape).astype(out_dtype) def verify_bitserial_conv2d_nhwc( batch, in_size, in_channel, num_filter, kernel, stride, padding, activation_bits, weight_bits, unipolar, use_relu=False, ): in_height = in_width = in_size input_type = "uint32" out_dtype = "int16" device = "llvm -device=arm_cpu -model=bcm2837 -mtriple=armv7l-linux-gnueabihf -mattr=+neon" with tvm.target.Target(device): A = te.placeholder((batch, in_height, in_width, in_channel), dtype=input_type, name="A") W = te.placeholder((kernel, kernel, in_channel, num_filter), dtype=input_type, name="W") B = topi.arm_cpu.bitserial_conv2d_nhwc( A, W, stride, padding, activation_bits, weight_bits, "uint8", out_dtype, unipolar ) if use_relu: B = topi.nn.relu(B) s = topi.arm_cpu.schedule_bitserial_conv2d_nhwc([B]) func = tvm.build(s, [A, W, B], device) assembly = func.get_source("asm") matches = re.findall("vpadal", assembly) assert len(matches) > 0 matches = re.findall("vcnt", assembly) assert len(matches) > 0 matches = re.findall("vpadd", assembly) assert len(matches) > 0 dev = tvm.device(device, 0) if "arm" not in os.uname()[4]: print("Skipped running code, not an arm device") return print("Running on target: %s" % device) def get_ref_data(): a_np = generate_quantized_np(get_const_tuple(A.shape), activation_bits, input_type) w_np = generate_quantized_np(get_const_tuple(W.shape), weight_bits, input_type) if unipolar: w_ = np.copy(w_np).astype(out_dtype) for x in np.nditer(w_, op_flags=["readwrite"]): x[...] = 1 if x == 1 else -1 b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).asty
pe(out_dtype) else: b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype( out_dtype ) return a_np, w_np, b_np a_np, w_np, b_np = get_ref_data() a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, W, B], device) func(a, w, b) np.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) def test_bitserial_conv2d(): in_size = 56 ic, oc = 64, 64 k = 3 stride = 1 pad = 1 verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, False) verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, False) verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, True) verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True) verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True, True) if __name__ == "__main__": test_bitserial_conv2d()
"""Test code for bitserial_dense operator"""
import os
import numpy as np
import tvm from tvm
import te from tvm
import topi
import tvm.testing
import tvm.topi.testing from tvm.topi.utils
import get_const_tuple from tvm.contrib.pickle_memoize
import memoize _bitserial_dense_implement = { "generic": (topi.nn.bitserial_dense, topi.generic.schedule_bitserial_dense), "cpu": (topi.x86.bitserial_dense, topi.x86.schedule_bitserial_dense), "arm_cpu": (topi.arm_cpu.bitserial_dense, topi.arm_cpu.schedule_bitserial_dense), } def generate_quantized_np(shape, bits, out_dtype): min_val = 0 max_val = 1 << bits return np.random.randint(min_val, max_val, size=shape).astype(out_dtype) def verify_bitserial_dense(batch, in_dim, out_dim, activation_bits, weight_bits, unipolar): out_dtype = "int16" def get_ref_data(a_shape, b_shape, input_dtype): a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype) b_np = generate_quantized_np(get_const_tuple(b_shape), weight_bits, input_dtype) if unipolar: b_ = np.copy(b_np).astype(out_dtype) for x in np.nditer(b_, op_flags=["readwrite"]): x[...] = 1 if x == 1 else -1 c_np = np.dot(a_np, b_.T) else: c_np = np.dot(a_np, b_np.T) return a_np, b_np, c_np for target in ["llvm", "llvm -device=arm_cpu"]: if "arm_cpu" in target and "arm" not in os.uname()[4]: print("Skipped running code, not an arm device") continue input_dtype = "uint8" if "arm_cpu" in target else "uint32" A = te.placeholder((batch, in_dim), dtype=input_dtype, name="A") B = te.placeholder((out_dim, in_dim), dtype=input_dtype, name="B") fcompute, fschedule = tvm.topi.testing.dispatch(target, _bitserial_dense_implement) C = fcompute(A, B, activation_bits, weight_bits, input_dtype, out_dtype, unipolar) s = fschedule([C]) a_shape = get_const_tuple(A.shape) b_shape = get_const_tuple(B.shape) a_np, b_np, c_np = get_ref_data(a_shape, b_shape, input_dtype) dev = tvm.cpu(0) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros(get_const_tuple(C.shap
e), dtype=C.dtype), dev) func = tvm.build(s, [A, B, C], target) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) def test_bitserial_dense(): verify_bitserial_dense(1, 1024, 1000, 1, 1, True) verify_bitserial_dense(1, 1024, 1000, 2, 1, True) verify_bitserial_dense(1, 1024, 1000, 1, 1, False) verify_bitserial_dense(1, 1024, 1000, 2, 1, False) if __name__ == "__main__": test_bitserial_dense()
"""Test code for binary neural network operators."""
import numpy as np
import tvm
import tvm.testing from tvm
import te from tvm
import topi from tvm.topi.utils
import get_const_tuple from tvm.contrib.pickle_memoize
import memoize def verify_binary_dense(batch, in_dim, out_dim): A = te.placeholder((batch, in_dim), name="A") B = te.placeholder((out_dim, in_dim), name="B") bnn_A = topi.nn.binarize_pack(A) bnn_B = topi.nn.binarize_pack(B) bnn_A1 = te.placeholder(bnn_A.shape, dtype=bnn_A.dtype) bnn_B1 = te.placeholder(bnn_B.shape, dtype=bnn_B.dtype) bnn_C = topi.nn.binary_dense(bnn_A1, bnn_B1) with tvm.target.Target("llvm"): s1 = topi.x86.schedule_binarize_pack(bnn_A) s2 = topi.x86.schedule_binarize_pack(bnn_B) s3 = topi.x86.schedule_binary_dense(bnn_C) dtype = A.dtype @memoize("topi.tests.test_topi_binary_dense") def get_ref_data(): a_np = (np.random.randint(2, size=(batch, in_dim)) * 2 - 1).astype(dtype) b_np = (np.random.randint(2, size=(out_dim, in_dim)) * 2 - 1).astype(dtype) c_np = np.dot(a_np, b_np.T) return a_np, b_np, c_np a_np, b_np, c_np = get_ref_data() dev = tvm.cpu(0) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) bnn_a = tvm.nd.array(np.zeros(get_const_tuple(bnn_A.shape), dtype=bnn_A.dtype), dev) bnn_b = tvm.nd.array(np.zeros(get_const_tuple(bnn_B.shape), dtype=bnn_B.dtype), dev) bnn_c = tvm.nd.array(np.zeros(get_const_tuple(bnn_C.shape), dtype=bnn_C.dtype), dev) f1 = tvm.build(s1, [A, bnn_A], "llvm") f2 = tvm.build(s2, [B, bnn_B], "llvm") f3 = tvm.build(s3, [bnn_A1, bnn_B1, bnn_C], "llvm") f1(a, bnn_a) f2(b, bnn_b) f3(bnn_a, bnn_b, bnn_c) tvm.testing.assert_allclose(bnn_c.numpy(), c_np, rtol=1e-5) def test_binary_dense(): verify_binary_dense(1, 4096, 1024) verify_binary_dense(1, 1024, 1000) if __name__ == "__main__": test_binary_dense()
"""Test code for broadcasting operators."""
import numpy as np
import tvm from tvm
import te from tvm
import topi
import tvm.testing
import tvm.topi.testing def verify_broadcast_to_ele(in_shape, out_shape, fbcast): A = te.placeholder(shape=in_shape, name="A") B = fbcast(A, out_shape) def check_target(target): dev = tvm.device(target, 0) if not tvm.testing.device_enabled(target): print("Skip because %s is not enabled" % target) return print("Running on target: %s" % target) with tvm.target.Target(target): s = tvm.topi.testing.get_broadcast_schedule(target)(B) foo = tvm.build(s, [A, B], target, name="broadcast_to") data_npy = np.random.uniform(size=in_shape).astype(A.dtype) out_npy = np.broadcast_to(data_npy, out_shape) data_nd = tvm.nd.array(data_npy, dev) out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), dev) foo(data_nd, out_nd) tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_target(target) check_target("sdaccel") def verify_broadcast_binary_ele( lhs_shape, rhs_shape, ftopi, fnumpy, lhs_min=-100, lhs_max=100, rhs_min=-100, rhs_max=100, dtype="float32", ): A = ( te.var("A", dtype=dtype) if lhs_shape is None else te.placeholder(shape=lhs_shape, name="A", dtype=dtype) ) B = ( te.var("B", dtype=dtype) if rhs_shape is None else te.placeholder(shape=rhs_shape, name="B", dtype=dtype) ) C = ftopi(A, B) if isinstance(A, tvm.tir.PrimExpr) and isinstance(B, tvm.tir.PrimExpr): assert isinstance(C, tvm.tir.PrimExpr) return def gen_operand(shape, low, high, dev): if shape is None: npy = float(np.random.uniform(low=low, high=high)) if dtype.startswith("int"): npy = int(npy) nd = npy else: npy = np.random.uniform(low=low, high=high, size=shape).astype(dtype) nd = tvm.nd.array(npy, dev) return npy, nd
def check_target(target): dev = tvm.device(target, 0) if not tvm.testing.device_enabled(target): print("Skip because %s is not enabled" % target) return print("Running on target: %s" % target) with tvm.target.Target(target): s = tvm.topi.testing.get_broadcast_schedule(target)(C) foo = tvm.build(s, [A, B, C], target, name="broadcast_binary" + "_" + ftopi.__name__) lhs_npy, lhs_nd = gen_operand(lhs_shape, lhs_min, lhs_max, dev) rhs_npy, rhs_nd = gen_operand(rhs_shape, rhs_min, rhs_max, dev) out_npy = fnumpy(lhs_npy, rhs_npy) out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev) foo(lhs_nd, rhs_nd, out_nd) tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4) for target, dev in tvm.testing.enabled_targets(): check_target(target) check_target("sdaccel") @tvm.testing.uses_gpu def test_broadcast_to(): verify_broadcast_to_ele((1,), (10,), topi.broadcast_to) verify_broadcast_to_ele((), (10,), topi.broadcast_to) verify_broadcast_to_ele((1, 1, 5, 4), (3, 4, 4, 4, 5, 4), topi.broadcast_to) verify_broadcast_to_ele((1, 128, 1, 32), (64, 128, 64, 32), topi.broadcast_to) @tvm.testing.uses_gpu def test_add(): verify_broadcast_binary_ele((), (), topi.add, np.add) verify_broadcast_binary_ele((5, 2, 3), (2, 1), topi.add, np.add) @tvm.testing.uses_gpu def test_subtract(): verify_broadcast_binary_ele((5, 2, 3), (), topi.subtract, np.subtract) verify_broadcast_binary_ele((5, 2, 3), None, topi.subtract, np.subtract) verify_broadcast_binary_ele(None, None, topi.subtract, np.subtract) verify_broadcast_binary_ele((1, 32), (64, 32), topi.subtract, np.subtract) @tvm.testing.uses_gpu def test_multiply(): verify_broadcast_binary_ele((5, 64, 128), (2, 5, 64, 1), topi.multiply, np.multiply) @tvm.testing.uses_gpu def test_divide(): verify_broadcast_binary_ele(None, (10,), topi.divide, np.divide, rhs_min=0.00
01) verify_broadcast_binary_ele((), None, topi.divide, np.divide, rhs_min=0.0001) verify_broadcast_binary_ele((2, 3, 1, 32), (64, 32), topi.divide, np.divide, rhs_min=0.0001) @tvm.testing.uses_gpu def test_floor_divide(): def _canonical_floor_div(a, b): return np.floor(a / b) verify_broadcast_binary_ele( None, (10,), topi.floor_divide, _canonical_floor_div, rhs_min=0.0001 ) verify_broadcast_binary_ele((), None, topi.floor_divide, _canonical_floor_div, rhs_min=0.0001) verify_broadcast_binary_ele( (2, 3, 64, 32), (64, 32), topi.floor_divide, _canonical_floor_div, rhs_min=0.0001 ) @tvm.testing.uses_gpu def test_maximum_minmum(): verify_broadcast_binary_ele((32,), (64, 32), topi.maximum, np.maximum) verify_broadcast_binary_ele((1, 2, 2, 1, 32), (64, 32), topi.minimum, np.minimum) @tvm.testing.uses_gpu def test_power(): verify_broadcast_binary_ele( (1, 2, 2), (2,), topi.power, np.power, lhs_min=0.001, rhs_min=0.001, rhs_max=2 ) @tvm.testing.uses_gpu def test_mod(): verify_broadcast_binary_ele( (1, 2, 2), (2,), topi.mod, np.mod, lhs_min=0.001, rhs_min=1, dtype="int32" ) @tvm.testing.uses_gpu def test_floor_mod(): def _canonical_floor_mod(a, b): return a - np.floor(a / b) * b verify_broadcast_binary_ele( (1, 2, 2), (2,), topi.floor_mod, _canonical_floor_mod, lhs_min=0.001, rhs_min=1, dtype="int32", ) verify_broadcast_binary_ele( (3, 4, 5), (3, 4, 5), topi.floor_mod, _canonical_floor_mod, lhs_min=0.001, rhs_min=1, dtype="float32", ) @tvm.testing.uses_gpu def test_cmp(): def greater(x, y): return topi.greater(x, y).astype("int8") def less(x, y): return topi.less(x, y).astype("int8") def equal(x, y): return topi.equal(x, y).astype("int8") def not_equal(x, y): return topi.not_equal(x, y).astype("int8") def greater_equa
l(x, y): return topi.greater_equal(x, y).astype("int8") def less_equal(x, y): return topi.less_equal(x, y).astype("int8") verify_broadcast_binary_ele((1, 2, 2), (2,), greater, np.greater) verify_broadcast_binary_ele((2, 1, 2), (2, 3, 1), less, np.less) verify_broadcast_binary_ele( (2, 1, 2), (2, 3, 1), equal, np.equal, lhs_min=-2, lhs_max=2, rhs_min=-2, rhs_max=2, dtype="int32", ) verify_broadcast_binary_ele( (2, 1, 2), (2, 3, 1), not_equal, np.not_equal, lhs_min=-2, lhs_max=2, rhs_min=-2, rhs_max=2, dtype="int32", ) verify_broadcast_binary_ele( (7, 1, 5), (7, 3, 1), greater_equal, np.greater_equal, lhs_min=-3, lhs_max=3, rhs_min=-3, rhs_max=3, dtype="int32", ) verify_broadcast_binary_ele( (7, 1, 5), (7, 3, 1), less_equal, np.less_equal, lhs_min=-3, lhs_max=3, rhs_min=-3, rhs_max=3, dtype="int32", ) @tvm.testing.uses_gpu def test_shift(): verify_broadcast_binary_ele( (2, 1, 2), None, topi.right_shift, np.right_shift, dtype="int32", rhs_min=0, rhs_max=32 ) verify_broadcast_binary_ele( (1, 2, 2), (2,), topi.left_shift, np.left_shift, dtype="int32", rhs_min=0, rhs_max=32 ) verify_broadcast_binary_ele( (1, 2, 2), (2,), topi.left_shift, np.left_shift, dtype="int32", rhs_min=0, rhs_max=32 ) @tvm.testing.uses_gpu def test_logical_single_ele(): def test_apply( func, name, f_numpy, indata, dtype="bool", ): A = te.placeholder(shape=indata.shape, name="A", dtype=dtype) B = func(A) if isinstance(A, tvm.tir.PrimExpr): assert isinstance(B, tvm.tir.PrimExpr) return def check_target(target, dev): print("
Running on target: %s" % target) with tvm.target.Target(target): s = tvm.topi.testing.get_broadcast_schedule(target)(B) foo = tvm.build(s, [A, B], target, name=name) data_npy = indata.astype(A.dtype) data_nd = tvm.nd.array(data_npy, dev) out_npy = f_numpy(indata) out_nd = tvm.nd.array(np.empty(data_npy.shape).astype(B.dtype), dev) foo(data_nd, out_nd) tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) test_apply(topi.logical_not, "logical_not", np.logical_not, np.array([True, False, 0, 1])) test_apply(topi.logical_not, "logical_not", np.logical_not, np.array(np.arange(5) < 3)) @tvm.testing.uses_gpu def test_bitwise_not(): def test_apply( func, name, f_numpy, shape, dtype="int32", ): A = te.placeholder(shape=shape, name="A", dtype=dtype) B = func(A) if isinstance(A, tvm.tir.PrimExpr): assert isinstance(B, tvm.tir.PrimExpr) return def check_target(target, dev): print("Running on target: %s" % target) with tvm.target.Target(target): s = tvm.topi.testing.get_broadcast_schedule(target)(B) foo = tvm.build(s, [A, B], target, name=name) data_npy = np.random.uniform(size=shape).astype(A.dtype) data_nd = tvm.nd.array(data_npy, dev) out_npy = f_numpy(data_npy) out_nd = tvm.nd.array(np.empty(data_npy.shape).astype(B.dtype), dev) foo(data_nd, out_nd) tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) test_apply(topi.bitwise_not, "bitwise_not", np.bitwise_not, ()) test_apply(topi.bitwise_not, "bitwise_not", np.bitwise_not, (2, 1, 2)) @tvm.testing.uses_gpu def tes
t_logical_binary_ele(): def test_apply( func, name, f_numpy, lhs, rhs, dtype="bool", ): A = te.var("A", dtype=dtype) B = te.var("B", dtype=dtype) C = func(A, B) if isinstance(A, tvm.tir.PrimExpr) and isinstance(B, tvm.tir.PrimExpr): assert isinstance(C, tvm.tir.PrimExpr) return def check_target(target, dev): print("Running on target: %s" % target) with tvm.target.Target(target): s = tvm.topi.testing.get_broadcast_schedule(target)(C) foo = tvm.build(s, [A, B, C], target, name=name) lhs_nd = tvm.nd.array(lhs, dev) rhs_nd = tvm.nd.array(rhs, dev) out_npy = f_numpy(lhs, rhs) out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev) foo(lhs_nd, rhs_nd, out_nd) tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) test_apply(topi.logical_and, "logical_and", np.logical_and, True, False) test_apply(topi.logical_and, "logical_and", np.logical_and, [True, False], [False, False]) test_apply(topi.logical_or, "logical_or", np.logical_or, True, False) test_apply(topi.logical_or, "logical_or", np.logical_or, [True, False], [False, False]) test_apply(topi.logical_xor, "logical_xor", np.logical_xor, True, False) test_apply(topi.logical_xor, "logical_xor", np.logical_xor, [True, False], [False, False]) @tvm.testing.uses_gpu def test_bitwise_and(): verify_broadcast_binary_ele(None, None, topi.bitwise_and, np.bitwise_and, dtype="int32") verify_broadcast_binary_ele( (2, 1, 2), (2, 1, 2), topi.bitwise_and, np.bitwise_and, dtype="int32" ) @tvm.testing.uses_gpu def test_bitwise_or(): verify_broadcast_binary_ele(None, None, topi.bitwise_or, np.bitwise_or, dtype="int32") verify_broadcast_binary_ele((2, 1,
2), (2, 1, 2), topi.bitwise_or, np.bitwise_or, dtype="int32") @tvm.testing.uses_gpu def test_bitwise_xor(): verify_broadcast_binary_ele(None, None, topi.bitwise_xor, np.bitwise_xor, dtype="int32") verify_broadcast_binary_ele( (2, 1, 2), (2, 1, 2), topi.bitwise_xor, np.bitwise_xor, dtype="int32" ) if __name__ == "__main__": test_add() test_shift() test_cmp() test_mod() test_floor_mod() test_subtract() test_multiply() test_divide() test_floor_divide() test_maximum_minmum() test_power() test_broadcast_to() test_logical_single_ele() test_bitwise_not() test_logical_binary_ele() test_bitwise_and() test_bitwise_or() test_bitwise_xor()