text
stringlengths 1
2.05k
|
---|
t32")), zero_point, axis=0
),
kernel_size=[5, 5],
channels=16,
)
op = relay.qnn.op.quantize(op, relay.const(1.0), relay.const(0), out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
compare_fq_to_int(op, [x_np, w_np], allow_rounding_error=True)
def test_fake_quantize_transposeconv():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[3, 16, 5, 5], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.conv2d_transpose(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="IOHW",
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
w_np = np.random.randint(-128, 127, size=[3, 16, 5, 5], dtype="int8")
compare_fq_to_int(op, [x_np, w_np])
def test_fake_quantize_dense():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[128, 64], dtype="int8")
w_np = np.random.randint(-128, 127, size=[256, 64], dtype="int8")
compare_fq_to_int(op, [x_np, w_np])
def test_fake_quantize_dense_per_channel():
for out_dtype in ["int8", "uint8"]:
x = re |
lay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(
w,
relay.const(np.random.random([256]).astype("float32")),
relay.const([0] * 256),
axis=0,
),
units=256,
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[128, 64], dtype="int8")
w_np = np.random.randint(-128, 127, size=[256, 64], dtype="int8")
compare_fq_to_int(op, [x_np, w_np], allow_rounding_error=True)
def test_fake_quantize_batch_matmul():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[1, 128, 64], dtype="int8")
w = relay.var("w", shape=[1, 256, 64], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.batch_matmul(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[1, 128, 64], dtype="int8")
w_np = np.random.randint(-128, 127, size=[1, 256, 64], dtype="int8")
compare_fq_to_int(op, [x_np, w_np])
def test_fake_transpose_quantize_conv():
x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
x = relay.transpose(x, [0, 3, 1, 2])
op = relay.op.nn.conv2d(
x, relay.qnn.op.dequantize(w, relay.const(0.5), zero), kernel_size=[5, 5]
)
op = relay.qnn.op.quantize(op, one, zero)
x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], |
dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
compare_fq_to_int(op, [x_np, w_np])
@pytest.mark.parametrize("const_bias", [False, True])
def test_fake_transpose_quantize_conv_bias_add(const_bias):
x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
if const_bias:
bias = relay.const(np.random.random(16).astype("float32"))
else:
bias = relay.qnn.op.dequantize(relay.var("bias", shape=[16], dtype="int32"), one, zero)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
x = relay.transpose(x, [0, 3, 1, 2])
op = relay.op.nn.conv2d(
x, relay.qnn.op.dequantize(w, relay.const(0.5), zero), kernel_size=[5, 5]
)
op = relay.op.nn.bias_add(op, bias)
op = relay.qnn.op.quantize(op, one, zero)
x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
bias_np = np.random.randint(-32768, 32767, size=[16], dtype="int32")
args = [x_np, w_np]
if not const_bias:
args.append(bias_np)
compare_fq_to_int(op, args)
def test_fake_transpose_quantize_conv_bias_add_per_channel():
x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
bias = relay.var("bias", shape=[16], dtype="int32")
one = relay.const(1.0)
zero = relay.const(0)
w_scale = (np.random.random([16]).astype("float32") - 0.5) / 10 + 0.5
noise = (np.random.random([16]).astype("float32") - 0.5) * 1e-15
w_zp = relay.const([0] * 16)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
x = relay.transpose(x, [0, 3, 1, 2])
op = relay.op.nn.conv2d(
x, relay.qnn.op.dequantize(w, relay.const(w_scale), w_zp, axis=0), kernel_size=[5, 5]
)
op = relay.op.nn.bias_add(
op, relay.qnn.op.dequantize(bias, relay.const(2.0 * w_scale + no |
ise), w_zp, axis=0)
)
op = relay.qnn.op.quantize(op, one, zero)
x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
bias_np = np.random.randint(-32768, 32767, size=[16], dtype="int32")
compare_fq_to_int(op, [x_np, w_np, bias_np], allow_rounding_error=True)
def test_fake_transpose_quantize_conv_bias_add_mismatch():
x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
bias = relay.var("bias", shape=[16], dtype="int32")
one = relay.const(1.0)
two = relay.const(2.0)
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
x = relay.transpose(x, [0, 3, 1, 2])
op = relay.op.nn.conv2d(
x, relay.qnn.op.dequantize(w, relay.const(0.5), zero), kernel_size=[5, 5]
)
op = relay.op.nn.bias_add(op, relay.qnn.op.dequantize(bias, two, zero))
op = relay.qnn.op.quantize(op, one, zero)
x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
bias_np = np.random.randint(-32768, 32767, size=[16], dtype="int32")
compare_fq_to_int(op, [x_np, w_np, bias_np])
def test_fake_quantize_maxpool():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.nn.max_pool2d(x, [3, 3])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
@pytest.mark.parametrize("output_size", [None, 1])
def test_fake_quantize_adaptive_avgpool1d(output_size):
x = relay.var("x", shape=[1, 128, 768], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(-12))
op = relay.op.nn.adaptive_avg_pool1d(x, output_size)
op = relay.qnn.op.quantize(op, relay.const(0. |
5), relay.const(10))
x_np = np.random.randint(-128, 127, size=[1, 128, 768], dtype="int8")
compare_fq_to_int(op, [x_np], True)
def test_fake_quantize_avgpool():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(-12))
op = relay.op.nn.avg_pool2d(x, [3, 3])
op = relay.qnn.op.quantize(op, relay.const(0.5), relay.const(10))
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np], True)
def test_fake_quantize_global_avg_pool():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(-12))
op = relay.op.nn.global_avg_pool2d(x)
op = relay.qnn.op.quantize(op, relay.const(0.5), relay.const(10))
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np], True)
class TestUnaryQNNOp:
def helper_test_fake_quantize_unary_op(self, fp32_op, pos_values=False):
for dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[1, 3, 3, 3], dtype=dtype)
zero = -128 if dtype == "int8" else 0
if pos_values:
input_mid_point = relay.const(zero)
output_mid_point = relay.const(zero)
else:
input_mid_point = relay.const(np.random.randint(0, 255) + zero)
output_mid_point = relay.const(np.random.randint(0, 255) + zero)
input_scale = relay.const(np.random.rand())
output_scale = relay.const(np.random.rand())
x = relay.qnn.op.dequantize(x, input_scale, input_mid_point)
op = fp32_op(x)
op = relay.qnn.op.quantize(op, output_scale, output_mid_point, out_dtype=dtype)
x_np = np.random.randint(0 + zero, 255 + zero, size=[1, 3, 3, 3], dtype=dtype)
compare_fq_to_int(op, [x_np], True)
def test_sqrt(self):
self.helper_test_fake_quantize_una |
ry_op(fp32_op=relay.sqrt, pos_values=True)
def test_rsqrt(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.rsqrt, pos_values=True)
def test_exp(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.exp)
def test_erf(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.erf)
def test_sigmoid(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.sigmoid)
def test_tanh(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.tanh)
def test_log(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.log, pos_values=True)
def test_fake_quantize_reshape():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.reshape(x, [1, 3, -1])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_image_resize_bilinear():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.image.resize2d(x, size=[4, 4], method="linear")
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np], allow_rounding_error=True)
def test_fake_quantize_abs():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.abs(x)
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_expand_dims():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const |
(2.0), zero)
op = relay.op.expand_dims(x, axis=1)
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_squeeze():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.squeeze(x, axis=[0])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_strided_slice():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.strided_slice(x, begin=[0, 0, 0, 0], end=[1, 1, 112, 112])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_split():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.split(x, axis=3, indices_or_sections=2)
op = relay.qnn.op.quantize(op[0], relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
op = relay.op.split(x, axis=3, indices_or_sections=[56, 112, 168])
op = relay.qnn.op.quantize(op[1], relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_batch_flatten():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.nn.batch_flatten(x)
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, si |
ze=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_transpose_reshape():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.transpose(x, [1, 0, 2, 3])
op = relay.op.reshape(op, [3, -1])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_concat():
zero = relay.const(0)
inputs = []
for i in range(4):
inputs.append(
relay.qnn.op.dequantize(
relay.var("x%d" % i, shape=[1, 4], dtype="int8"), relay.const(i + 0.5), zero
)
)
concat = relay.op.concatenate(inputs, axis=1)
out = relay.qnn.op.quantize(concat, relay.const(3.5), zero)
inputs_np = []
for i in range(4):
inputs_np.append(np.random.randint(-128, 127, size=[1, 4], dtype="int8"))
compare_fq_to_int(out, inputs_np)
@pytest.mark.parametrize("k", [0, 1, 5])
@pytest.mark.parametrize("axis", [0, -1, 1])
@pytest.mark.parametrize("is_ascend", [True, False])
@pytest.mark.parametrize("dtype", ["int8", "uint8"])
def test_fake_quantize_topk(k, axis, is_ascend, dtype):
x = relay.var("x", shape=[20, 100], dtype=dtype)
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.topk(x, k, axis, "values", is_ascend, "float32")
op = relay.qnn.op.quantize(op, relay.const(2.0), zero, out_dtype=dtype)
x_np = np.random.randint(0, 127, size=[20, 100], dtype=dtype)
compare_fq_to_int(op, [x_np])
def test_fake_quantize_clip():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(114))
op = relay.op.clip(x, 0, 6)
op = relay.qnn.op.quantize(op, relay.const(2.0), relay.const(114), out_dtype="uint8")
x_np = np.random.randint(0, 255, size=[1, 3, 224, |
224], dtype="uint8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_clip_per_channel():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(
x, relay.const([1.0, 2.0, 3.0]), relay.const([96, 114, 128]), axis=1
)
op = relay.op.clip(x, 0, 6)
op = relay.qnn.op.quantize(
op, relay.const([1.0, 2.0, 3.0]), relay.const([96, 114, 128]), out_dtype="uint8", axis=1
)
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_relu():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(114))
op = relay.op.nn.relu(x)
op = relay.qnn.op.quantize(op, relay.const(2.0), relay.const(114), out_dtype="uint8")
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_mean():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(114))
op = relay.op.mean(x)
op = relay.qnn.op.quantize(op, relay.const(2.0), relay.const(114), out_dtype="uint8")
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np], allow_rounding_error=True)
def test_fake_quantize_relu_per_channel():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(
x, relay.const([1.0, 2.0, 3.0]), relay.const([96, 114, 128]), axis=1
)
op = relay.op.nn.relu(x)
op = relay.qnn.op.quantize(
op, relay.const([1.0, 2.0, 3.0]), relay.const([96, 114, 128]), out_dtype="uint8", axis=1
)
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_leaky_relu():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(114)) |
op = relay.op.nn.leaky_relu(x, 0.1)
op = relay.qnn.op.quantize(op, relay.const(2.0), relay.const(114), out_dtype="uint8")
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np], True)
@pytest.mark.parametrize(
"operator",
[relay.op.add, relay.op.multiply, relay.op.subtract, relay.op.minimum, relay.op.maximum],
)
def test_fake_quantize_binary(operator):
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(0.1), relay.const(0))
y = relay.var("y", shape=[1, 3, 224, 224], dtype="int8")
y = relay.qnn.op.dequantize(y, relay.const(0.2), relay.const(0))
op = operator(x, y)
if operator == relay.op.multiply:
out_scale = relay.const(20.0)
else:
out_scale = relay.const(0.1)
op = relay.qnn.op.quantize(op, out_scale, relay.const(0), out_dtype="int8")
x_np = np.random.randint(-25, 25, size=[1, 3, 224, 224], dtype="int8")
y_np = np.random.randint(-25, 25, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np, y_np])
@pytest.mark.parametrize(
"operator",
[relay.op.add, relay.op.multiply, relay.op.subtract, relay.op.minimum, relay.op.maximum],
)
def test_fake_quantize_binary_per_channel(operator):
def verify_binary_per_channel(lhs_scale, rhs_scale, lhs_zp, rhs_zp, out_zp, lhs_axis, rhs_axis):
if operator == relay.op.multiply:
out_scale = relay.const(2.0)
rhs_axis = lhs_axis
else:
out_scale = relay.const(0.1)
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(lhs_scale), relay.const(lhs_zp), axis=lhs_axis)
y = relay.var("y", shape=[1, 3, 224, 224], dtype="int8")
y = relay.qnn.op.dequantize(y, relay.const(rhs_scale), relay.const(rhs_zp), axis=rhs_axis)
op = operator(x, y)
op = relay.qnn.op.quantize(op, out_scale, relay.const(out_zp), out_dtype="int8")
x_np = np.rando |
m.randint(-25, 25, size=[1, 3, 224, 224], dtype="int8")
y_np = np.random.randint(-25, 25, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np, y_np], allow_rounding_error=True)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 3),
rhs_scale=np.random.uniform(1.0, 5.0, 3),
lhs_zp=0,
rhs_zp=0,
out_zp=0,
lhs_axis=1,
rhs_axis=1,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 3),
rhs_scale=np.random.uniform(1.0, 5.0, 3),
lhs_zp=np.random.randint(1, 3),
rhs_zp=np.random.randint(1, 3),
out_zp=0,
lhs_axis=1,
rhs_axis=1,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 3),
rhs_scale=np.random.uniform(1.0, 5.0, 3),
lhs_zp=np.random.randint(1, 3),
rhs_zp=np.random.randint(1, 3),
out_zp=np.random.randint(1, 3),
lhs_axis=1,
rhs_axis=1,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 224),
rhs_scale=np.random.uniform(1.0, 5.0, 224),
lhs_zp=np.random.randint(1, 3),
rhs_zp=np.random.randint(1, 3),
out_zp=np.random.randint(1, 3),
lhs_axis=-1,
rhs_axis=-1,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 224),
rhs_scale=np.random.uniform(1.0, 5.0, 224),
lhs_zp=0,
rhs_zp=0,
out_zp=0,
lhs_axis=2,
rhs_axis=3,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 224),
rhs_scale=np.random.uniform(1.0, 5.0, 224),
lhs_zp=np.random.randint(1, 3),
rhs_zp=np.random.randint(1, 3),
out_zp=0,
lhs_axis=2,
rhs_axis=3,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 224),
rhs_scale=np.random.uniform(1.0, 5.0, 224),
lhs_zp=np.random.randint(1, 3),
rhs_zp=np.random.randint(1 |
, 3),
out_zp=np.random.randint(1, 3),
lhs_axis=2,
rhs_axis=3,
)
@pytest.mark.parametrize(
"operator",
[
relay.op.add,
relay.op.multiply,
relay.op.subtract,
relay.op.minimum,
relay.op.maximum,
],
)
def test_fake_quantize_binary_const(operator):
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(0.1), relay.const(10))
y = relay.const(1.0)
op = operator(x, y)
op = relay.qnn.op.quantize(op, relay.const(0.1), relay.const(10), out_dtype="int8")
x_np = np.random.randint(-25, 25, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_subtract_different_output_zp():
for dtype in ["uint8"]:
x = relay.var("x", shape=[1, 128, 128, 3], dtype=dtype)
x = relay.qnn.op.dequantize(x, relay.const(0.1), relay.const(0), axis=1)
y = relay.const(0.5)
op = relay.subtract(x, y)
op = relay.transpose(op, axes=[0, 3, 1, 2])
op = relay.qnn.op.quantize(op, relay.const(0.2), relay.const(128), out_dtype=dtype, axis=1)
x_np = np.random.randint(0, 255, size=[1, 128, 128, 3], dtype=dtype)
compare_fq_to_int(op, [x_np], True)
def test_fake_quantize_pad():
x = relay.var("x", shape=[1, 383, 128], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(1.0), relay.const(10))
op = relay.op.nn.pad(x, [[0, 0], [0, 1], [0, 0]], 0.0)
op = relay.qnn.op.quantize(op, relay.const(1.0), relay.const(10), out_dtype="int8")
x_np = np.random.randint(-25, 25, size=[1, 383, 128], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_depth_to_space():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.nn.depth_to_space(x, 4)
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], d |
type="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_max_min():
def run_test_case(partial_func):
x = relay.var("x", shape=[1, 3, 10, 10], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
x = relay.op.nn.depth_to_space(x, 4)
op = partial_func(x)
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 10, 10], dtype="int8")
compare_fq_to_int(op, [x_np])
run_test_case(relay.op.max)
run_test_case(relay.op.min)
run_test_case(lambda x: relay.op.max(x, axis=1))
run_test_case(lambda x: relay.op.min(x, axis=1))
def test_fq_avg_pool_conv2d():
dtype = "uint8"
shape_x = [1, 4, 24, 24]
shape_w = [8, 4, 1, 1]
x = relay.var("x", shape=shape_x, dtype=dtype)
w = relay.var("w", shape=shape_w, dtype=dtype)
zero = relay.const(0)
one = relay.const(1.0)
op0 = relay.qnn.op.dequantize(x, relay.const(0.64), relay.const(2))
op1 = relay.op.nn.avg_pool2d(op0, [3, 3])
op2 = relay.qnn.op.dequantize(w, relay.const(0.5), relay.const(10))
op3 = relay.op.nn.conv2d(op1, op2, kernel_size=[1, 1])
expr = relay.qnn.op.quantize(op3, one, zero, out_dtype="uint8")
x_np = np.random.randint(0, 255, size=shape_x, dtype=dtype)
w_np = np.random.randint(0, 255, size=shape_w, dtype=dtype)
compare_fq_to_int(expr, [x_np, w_np])
def test_fq_hard_fail():
@tvm.ir.register_op_attr("nn.conv2d", "FTVMFakeQuantizationToInteger", level=11)
def conv2d(expr, type_map):
raise NotImplementedError
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.conv2d(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
kernel_size=[5, 5],
)
op = relay.qnn.op.quantize(op, |
one, zero, out_dtype="int8")
mod = tvm.IRModule.from_expr(op)
mod = tvm.relay.transform.InferType()(mod)
mod_int = tvm.relay.transform.FakeQuantizationToInteger(hard_fail=False)(mod)
assert tvm.ir.structural_equal(mod_int, mod)
with pytest.raises(Exception):
mod_int = tvm.relay.transform.FakeQuantizationToInteger(hard_fail=True)(mod)
def compare_expected_fq_qat_to_int(expr, expected_expr, args, allow_rounding_error=False):
mod = tvm.IRModule.from_expr(expr)
mod_def = tvm.relay.transform.InferType()(mod)
mod_int = tvm.relay.transform.FakeQuantizationToInteger(False, True)(mod_def)
mod_exp = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(expected_expr))
assert not tvm.ir.structural_equal(mod, mod_int)
assert tvm.ir.structural_equal(mod_int, mod_exp)
result_def = (
relay.create_executor("vm", mod=mod_def, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_int = (
relay.create_executor("vm", mod=mod_int, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_exp = (
relay.create_executor("vm", mod=mod_exp, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
if allow_rounding_error:
assert np.all(np.abs(result_def.astype("int32") - result_int.astype("int32")) <= 1)
else:
assert np.array_equal(result_def, result_int)
assert np.array_equal(result_int, result_exp)
def test_fq_qat_op_positive_part():
shape_x = [1, 4, 2]
shape_w = [1, 4, 2]
a = relay.var("a", shape=shape_x, dtype="int8")
b = relay.var("b", shape=shape_w, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.qnn.op.dequantize(b, relay.const(6.0), relay.const(0))
op2 = relay.op.nn.batch_matmul(op0, op1)
op3 = relay.op.add(op2, relay.const(1.0))
expr = relay.op.erf(op3)
op0 = relay.qnn.op.qnn.batch_matmul(
a, b, relay.const(0), re |
lay.const(0), relay.const(2.0), relay.const(6.0)
)
op1 = relay.qnn.op.qnn.dequantize(op0, relay.const(12.0), relay.const(0))
op2 = relay.op.add(op1, relay.const(1.0))
expected_expr = relay.op.erf(op2)
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np, w_np])
def test_fq_qat_negative_all():
shape_x = [1, 4, 2]
shape_w = [1, 4, 2]
a = relay.var("a", shape=shape_x, dtype="int8")
b = relay.var("b", shape=shape_w, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.qnn.op.dequantize(b, relay.const(6.0), relay.const(0))
op2 = relay.op.add(op1, relay.const(1.0))
op3 = relay.op.nn.batch_matmul(op0, op2)
expr = relay.op.erf(op3)
expected_expr = expr
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np, w_np])
def test_fq_qat_positive_single():
shape_x = [1, 4, 2]
shape_w = [1, 4, 2]
a = relay.var("a", shape=shape_x, dtype="int8")
b = relay.var("b", shape=shape_w, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.qnn.op.dequantize(b, relay.const(6.0), relay.const(0))
expr = relay.op.nn.batch_matmul(op0, op1)
op0 = relay.qnn.op.qnn.batch_matmul(
a, b, relay.const(0), relay.const(0), relay.const(2.0), relay.const(6.0)
)
expected_expr = relay.qnn.op.qnn.dequantize(op0, relay.const(12.0), relay.const(0))
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np, w_np])
def test_fq_qat_positive_nothing_to_do():
shape_x = [1, 4, 2]
shape_w = [1, 4, 2]
a = relay.var("a", shape=shap |
e_x, dtype="int8")
b = relay.var("b", shape=shape_w, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.qnn.op.dequantize(b, relay.const(6.0), relay.const(0))
op2 = relay.op.nn.batch_matmul(op0, op1)
op3 = relay.op.add(op2, relay.const(1.0))
expr = relay.qnn.op.quantize(op3, relay.const(1.0), relay.const(0), out_dtype="int8")
op0 = relay.qnn.op.batch_matmul(
a, b, relay.const(0), relay.const(0), relay.const(2.0), relay.const(6.0)
)
op1 = relay.qnn.op.quantize(
relay.const(1.0), relay.const(12.0), relay.const(0), out_dtype="int32"
)
op2 = relay.qnn.op.add(
op0,
op1,
relay.const(12.0),
relay.const(0),
relay.const(12.0),
relay.const(0),
relay.const(12.0),
relay.const(0),
)
expected_expr = relay.qnn.op.requantize(
op2, relay.const(12.0), relay.const(0), relay.const(1.0), relay.const(0), out_dtype="int8"
)
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np, w_np])
def test_fq_qat_positive_couple():
shape_x = [1, 2, 4]
shape_w = [2]
a = relay.var("a", shape=shape_x, dtype="int8")
b = relay.var("b", shape=shape_w, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.qnn.op.dequantize(b, relay.const(6.0), relay.const(0))
op2 = relay.op.reshape(op0, (1, 4, 2))
op3 = relay.op.broadcast_to(op1, (2, 2, 2))
op4 = relay.op.nn.batch_matmul(op2, op3)
expr = relay.op.erf(op4)
op0 = relay.op.reshape(a, (1, 4, 2))
op1 = relay.op.broadcast_to(b, (2, 2, 2))
op3 = relay.qnn.op.qnn.batch_matmul(
op0, op1, relay.const(0), relay.const(0), relay.const(2.0), relay.const(6.0)
)
op4 = relay.qnn.op.qnn.dequantize(op3, relay.const(12.0), relay.const(0))
expected_expr = relay.op.erf(op4)
x_n |
p = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np, w_np])
def test_fq_positive_single_arg_part():
shape_x = [1, 2, 4]
a = relay.var("a", shape=shape_x, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.op.reshape(op0, (1, 4, 2))
expr = relay.op.erf(op1)
op0 = relay.op.reshape(a, (1, 4, 2))
op1 = relay.qnn.op.dequantize(op0, relay.const(2.0), relay.const(0))
expected_expr = relay.op.erf(op1)
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np])
def test_fq_qat_intermediate_infertype():
shape_x = [1, 2, 4]
x = relay.var("x", shape=shape_x, dtype="float32")
const_0 = relay.const(np.random.uniform(size=[1, 4, 2]).astype("float32"))
op0 = relay.qnn.op.quantize(x, relay.const(17.0), relay.const(0), out_dtype="int8")
op1 = relay.qnn.op.dequantize(op0, relay.const(17.0), relay.const(0))
op2 = relay.op.reshape(op1, (1, 4, 2))
op3 = relay.qnn.op.quantize(op2, relay.const(10.0), relay.const(0), out_dtype="int8")
op4 = relay.qnn.op.quantize(const_0, relay.const(1.0), relay.const(8), out_dtype="int8")
op5 = relay.qnn.op.dequantize(op3, relay.const(10.0), relay.const(0))
op6 = relay.qnn.op.dequantize(op4, relay.const(4.0), relay.const(9))
op7 = relay.op.nn.batch_matmul(op5, op6)
expr = relay.op.add(op7, relay.const(5.0))
op0 = relay.qnn.op.quantize(x, relay.const(17.0), relay.const(0), out_dtype="int8")
op1 = relay.op.reshape(op0, (1, 4, 2))
op2 = relay.qnn.op.requantize(
op1, relay.const(17.0), relay.const(0), relay.const(10.0), relay.const(0), out_dtype="int8"
)
op3 = relay.qnn.op.quantize(const_0, relay.const(1.0), relay.const(8), out_dtype="int8")
op4 = relay.qnn.op.batch_matmul(
op2, op3, relay.const(0), relay.const(9), relay.co |
nst(10.0), relay.const(4.0)
)
op5 = relay.qnn.op.dequantize(op4, relay.const(40.0), relay.const(0))
expected_expr = relay.op.add(op5, relay.const(5.0))
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int32").astype("float32")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np])
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm.ir |
import IRModule
from tvm |
import relay
from tvm.relay.transform |
import FastMath
def test_exp():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
y = relay.exp(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
fast_mod = FastMath()(mod)
assert "fast_exp" in fast_mod.astext()
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
fast_mod = relay.optimize(mod, target="llvm", params=None)
assert "fast_exp" in fast_mod[0].astext()
def test_tanh():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
y = relay.tanh(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
fast_mod = FastMath()(mod)
assert "fast_tanh" in fast_mod.astext()
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
fast_mod = relay.optimize(mod, target="llvm", params=None)
assert "fast_tanh" in fast_mod[0].astext()
def test_erf():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
y = relay.erf(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
fast_mod = FastMath()(mod)
assert "fast_erf" in fast_mod.astext()
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
fast_mod = relay.optimize(mod, target="llvm", params=None)
assert "fast_erf" in fast_mod[0].astext()
def test_softmax():
x = relay.var("x", shape=(1, 16), dtype="float32")
y = relay.nn.softmax(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
fast_mod = relay.optimize(mod, target="llvm")
assert "nn.fast_softmax" in fast_mod[0].astext()
if __name__ == "__main__":
test_exp()
test_tanh()
test_erf()
test_softmax() |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.contrib |
import graph_executor
def compare_expected_fac(expr, expected_expr, args):
mod_def = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(expr))
mod_flat = tvm.relay.transform.FlattenAtrousConv()(mod_def)
mod_exp = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(expected_expr))
assert expr is expected_expr or not tvm.ir.structural_equal(mod_def, mod_flat)
assert tvm.ir.structural_equal(mod_flat, mod_exp)
result_def = (
relay.create_executor("vm", mod=mod_def, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_flat = (
relay.create_executor("vm", mod=mod_flat, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_exp = (
relay.create_executor("vm", mod=mod_exp, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
assert np.array_equal(result_def, result_flat)
assert np.array_equal(result_flat, result_exp)
def test_fac_block_shape_2():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = relay.nn.conv2d(
data,
weight,
padding=[2, 2, 2, 2],
dilation=[2, 2],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
compare_expected_fac(expr, expected_expr, |
[x_np])
def test_fac_block_shape_4():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[4, 4], paddings=[[4, 7], [4, 7]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op2, block_shape=[4, 4], crops=[[0, 3], [0, 3]])
expected_expr = relay.nn.conv2d(
data,
weight,
padding=[4, 4, 4, 4],
dilation=[4, 4],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_quantize():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="int8")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.qnn.op.conv2d(
op1,
weight,
input_zero_point=relay.const(0),
kernel_zero_point=relay.const(0),
input_scale=relay.const(2.0),
kernel_scale=relay.const(1.0),
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = relay.qnn.op.conv2d(
data,
weight,
input_zero_point=relay.const(0) |
,
kernel_zero_point=relay.const(0),
input_scale=relay.const(2.0),
kernel_scale=relay.const(1.0),
padding=[2, 2, 2, 2],
dilation=[2, 2],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_surrounding():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op0 = relay.op.add(data, relay.const(1.0))
op1 = relay.nn.space_to_batch_nd(op0, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
op3 = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expr = relay.op.add(op3, relay.const(-1.0))
op0 = relay.op.add(data, relay.const(1.0))
op1 = relay.nn.conv2d(
op0,
weight,
padding=[2, 2, 2, 2],
dilation=[2, 2],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expected_expr = relay.op.add(op1, relay.const(-1.0))
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_several():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]]) |
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
op3 = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
op4 = relay.nn.space_to_batch_nd(op3, block_shape=[4, 4], paddings=[[4, 7], [4, 7]])
op5 = relay.nn.conv2d(
op4,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op5, block_shape=[4, 4], crops=[[0, 3], [0, 3]])
op1 = relay.nn.conv2d(
data,
weight,
padding=[2, 2, 2, 2],
dilation=[2, 2],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expected_expr = relay.nn.conv2d(
op1,
weight,
padding=[4, 4, 4, 4],
dilation=[4, 4],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
compare_expected_fac(expr, expected_expr, [x_np])
def test__fac_only_s2b_conv():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
expr = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_only_s2b():
shape_x = [1, 5, 5, 4] |
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
expr = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_only_conv_b2s():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.conv2d(
data,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op1, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_only_b2s():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
expr = relay.nn.batch_to_space_nd(data, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_op_btwn_s2b_conv():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("da |
ta", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op_1_5 = relay.op.add(op1, relay.const(1.0))
op2 = relay.nn.conv2d(
op_1_5,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_op_btwn_conv_b2s():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
op_2_5 = relay.op.add(op2, relay.const(1.0))
expr = relay.nn.batch_to_space_nd(op_2_5, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_relay_build():
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3], |
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
mod_def = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(expr))
result_def = (
relay.create_executor("vm", mod=mod_def, device=tvm.cpu(), target="llvm")
.evaluate()(x_np)
.numpy()
)
graph, lib, params = relay.build(mod_def, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu())
rt_mod.set_input("data", x_np)
rt_mod.set_input(**params)
rt_mod.run()
result_flat = rt_mod.get_output(0).numpy()
assert "space_to_batch_nd" not in graph
assert "conv2d" in graph
assert "batch_to_space_nd" not in graph
assert np.array_equal(result_def, result_flat)
if __name__ == "__main__":
tvm.testing.main() |
"""Test flexible shape dispatch pass""" |
import numpy as np |
import pytest |
import tvm
from tvm |
import relay
from tvm.relay.testing.resnet |
import get_workload
from tvm.relay |
import vm
from tvm |
import runtime
def test_end_to_end():
mod, params = get_workload()
mod = relay.transform.FlexibleShapeDispatch(axis=0, buckets=[1, 4], auto_pad=True)(mod)
exe = relay.vm.compile(mod, "llvm", params=params)
vm = runtime.vm.VirtualMachine(exe, tvm.cpu())
batch_1 = np.random.normal(size=[1, 3, 224, 224]).astype("float32")
assert list(vm.invoke("main", batch_1).shape) == [1, 1000]
batch_4 = np.random.normal(size=[4, 3, 224, 224]).astype("float32")
assert list(vm.invoke("main", batch_4).shape) == [4, 1000]
batch_3 = np.random.normal(size=[3, 3, 224, 224]).astype("float32")
assert list(vm.invoke("main", batch_3).shape) == [3, 1000]
def test_multiple_inputs():
x = relay.var("x", shape=[10, 10], dtype="float32")
w = relay.var("w", shape=[10, 10], dtype="float32")
y = x + w
mod = tvm.IRModule.from_expr(y)
mod = relay.transform.FlexibleShapeDispatch(axis=1, buckets=[5, 10], input_indices=[0, 1])(mod)
exe = relay.vm.compile(mod, "llvm")
vm = runtime.vm.VirtualMachine(exe, tvm.cpu())
x_w_5 = np.random.normal(size=[10, 5]).astype("float32")
assert list(vm.invoke("main", x_w_5, x_w_5).shape) == [10, 5]
x_w_10 = np.random.normal(size=[10, 10]).astype("float32")
assert list(vm.invoke("main", x_w_10, x_w_10).shape) == [10, 10]
def test_fixed_output():
x = relay.var("x", shape=[10, 10], dtype="float32")
w = relay.var("w", shape=[10, 10], dtype="float32")
y = relay.nn.dense(x, w)
mod = tvm.IRModule.from_expr(y)
mod = relay.transform.FlexibleShapeDispatch(
axis=1, buckets=[5, 7], input_indices=[0, 1], affects_output=False
)(mod)
exe = relay.vm.compile(mod, "llvm")
vm = runtime.vm.VirtualMachine(exe, tvm.cpu())
x_w_5 = np.random.normal(size=[10, 5]).astype("float32")
assert list(vm.invoke("main", x_w_5, x_w_5).shape) == [10, 10]
x_w_7 = np.random.normal(size=[10, 7]).astype("float32")
assert list(vm.invoke("main", x_w_7, x_w_ |
7).shape) == [10, 10]
return
def test_multiple_outputs():
x = relay.var("x", shape=[10, 10], dtype="float32")
y = relay.split(x, 2, axis=1)
mod = tvm.IRModule.from_expr(y.astuple())
mod = relay.transform.FlexibleShapeDispatch(axis=0, buckets=[5, 10])(mod)
exe = relay.vm.compile(mod, "llvm")
vm = runtime.vm.VirtualMachine(exe, tvm.cpu())
x_5 = np.random.normal(size=[5, 10]).astype("float32")
result_5 = vm.invoke("main", x_5)
assert list(result_5[0].shape) == [5, 5]
assert list(result_5[1].shape) == [5, 5]
x_10 = np.random.normal(size=[10, 10]).astype("float32")
result_10 = vm.invoke("main", x_10)
assert list(result_10[0].shape) == [10, 5]
assert list(result_10[1].shape) == [10, 5]
if __name__ == "__main__":
pytest.main([__file__]) |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.relay.backend |
import Executor
from tvm.relay |
import transform
from tvm.relay.build_module |
import bind_params_by_name
from tvm.relay.testing |
import run_infer_type, create_workload
def annot_expr(e):
"""Returns e wrapped with an on_device annotation."""
return relay.op.annotation.on_device(e, tvm.cpu(), constrain_result=True)
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(mod)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_concatenate_const():
def before():
data = tvm.nd.array(np.array([1.0, 2.0, 3.0]))
const = relay.const(data)
concat = relay.op.concatenate([const, const], axis=0)
func = relay.Function([], concat)
return func
def expected():
data = tvm.nd.array(np.array([1.0, 2.0, 3.0, 1.0, 2.0, 3.0]))
const = relay.const(data)
func = relay.Function([], const)
return func
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(zz, zexpected)
def test_fold_const():
c_data = np.array([1, 2, 3]).astype("float32")
t = relay.TensorType([1, 2, 3], "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", t)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(x, y)
z = relay.add(y, c)
return relay.Function([x], z)
def expected():
x = relay.var("x", t)
c_folded = (c_data + c_data) * 2
y = relay.add(x, relay.const(c_folded))
z = relay.add(y, relay.const(c_data))
return relay.Function([x], z)
with tvm.target.Target("cuda"):
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_const_with_on_device():
"""Make sure on_device annotations don't get in the way of constant foldi |
ng"""
c_data = np.array([1, 2, 3]).astype("float32")
t = relay.TensorType([1, 2, 3], "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", t)
x.virtual_device_ = tvm.cpu()
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(x, y)
z = relay.add(y, c)
f = relay.Function([x], z)
f.virtual_device_ = tvm.cpu()
return f
def expected():
x = relay.var("x", t)
x.virtual_device_ = tvm.cpu()
c_folded = (c_data + c_data) * 2
y = relay.add(x, relay.const(c_folded))
z = relay.add(y, relay.const(c_data))
f = relay.Function([x], z)
f.virtual_device_ = tvm.cpu()
return f
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_let():
c_data = np.array(1).astype("float32")
t = relay.TensorType([1], "float32")
def before():
sb = relay.ScopeBuilder()
x = relay.var("x", t)
t1 = sb.let("t1", relay.const(c_data))
t2 = sb.let("t2", relay.add(t1, t1))
t3 = sb.let("t3", relay.add(t2, x))
sb.ret(t3)
return relay.Function([x], sb.get())
def expected():
sb = relay.ScopeBuilder()
x = relay.var("x", t)
c_folded = c_data + c_data
t3 = sb.let("t3", relay.add(relay.const(c_folded), x))
sb.ret(t3)
return relay.Function([x], sb.get())
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_let_with_on_device():
"""Make sure on_device annotations don't get in the way of constant folding,
and inlined constants bring their annotations with them."""
c_data = np.array(1).astype("float32")
t = relay.TensorType([1], "float32")
def before():
sb |
= relay.ScopeBuilder()
x = relay.var("x", t)
x.virtual_device_ = tvm.cpu()
t1 = sb.let("t1", annot_expr(relay.const(c_data)))
t2 = sb.let("t2", annot_expr(relay.add(t1, t1)))
t3 = sb.let("t3", annot_expr(relay.add(t2, x)))
sb.ret(t3)
f = relay.Function([x], sb.get())
f.virtual_device_ = tvm.cpu()
return f
def expected():
sb = relay.ScopeBuilder()
x = relay.var("x", t)
x.virtual_device_ = tvm.cpu()
c_folded = c_data + c_data
t3 = sb.let("t3", annot_expr(relay.add(annot_expr(relay.const(c_folded)), x)))
sb.ret(t3)
f = relay.Function([x], sb.get())
f.virtual_device_ = tvm.cpu()
return f
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_tuple():
c_data = np.array(1).astype("float32")
t = relay.TensorType([1], "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", t)
y = relay.Tuple([x, c])
z = relay.add(y[1], c)
z = relay.add(z, y[0])
return relay.Function([x], z)
def expected():
c = relay.const(c_data + c_data)
x = relay.var("x", t)
z = relay.add(c, x)
return relay.Function([x], z)
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_concat():
c_data = np.array([[1, 2, 3]]).astype("float32")
def before():
a = relay.const(c_data)
b = relay.const(c_data)
y = relay.concatenate((a, b), axis=0)
return relay.Function([], y)
def expected():
y_data = np.concatenate((c_data, c_data), axis=0)
y = relay.const(y_data)
return relay.Function([], y)
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(e |
xpected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_if():
cond_data = np.array(1).astype("bool")
x_data = np.array([[1, 2, 3]]).astype("float32")
def before():
a = relay.const(cond_data)
x = relay.const(x_data)
y = relay.const(x_data)
iff = relay.If(a, x + y, x - y)
return relay.Function([], iff)
def expected():
y_data = x_data + x_data
y = relay.const(y_data)
return relay.Function([], y)
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
cond_data = np.array(0).astype("bool")
def before():
a = relay.const(cond_data)
x = relay.const(x_data)
y = relay.const(x_data)
iff = relay.If(a, x + y, x - y)
return relay.Function([], iff)
def expected():
y_data = x_data - x_data
y = relay.const(y_data)
return relay.Function([], y)
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_shape_of():
c_shape = (8, 9, 10)
def before(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.shape_of(x + y, dtype)
return relay.Function([x, y], z)
def expected(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.const(np.array(c_shape).astype(dtype), dtype=dtype)
func = relay.Function([x, y], z)
return func
for dtype in ["int32", "float32"]:
zz = run_opt_pass(before(dtype), transform.FoldConstant())
zexpected = run_opt_pass(expected(dtype), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_ndarray_size() |
:
c_shape = (8, 9, 10)
def before(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.ndarray_size(x + y, dtype)
return relay.Function([x, y], z)
def expected(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.const(np.size(np.zeros(c_shape)), dtype=dtype)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
return mod["main"]
for dtype in ["int32", "float32"]:
zz = run_opt_pass(before(dtype), transform.FoldConstant())
zexpected = run_opt_pass(expected(dtype), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_batch_norm():
def expected():
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.const(np.zeros((16, 3, 3, 3)))
bias = relay.const(np.zeros((16, 1, 1)))
conv = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
add = relay.add(conv, bias)
return relay.Function(relay.analysis.free_vars(add), add)
remove_bn_pass = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.SimplifyInference(),
relay.transform.FoldConstant(),
relay.transform.FoldScaleAxis(),
]
)
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
conv = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
bn_output = relay.nn.batch_norm(conv, bn_gamma, bn_beta, bn_mmean, bn_mvar)
def initializer(_, param):
param = np.zeros(param.shape) |
mod, params = create_workload(bn_output[0], initializer)
mod["main"] = bind_params_by_name(mod["main"], params)
with tvm.transform.PassContext(opt_level=3):
mod = remove_bn_pass(mod)
expect = run_infer_type(expected())
tvm.ir.assert_structural_equal(mod["main"], expect)
def test_fold_dropout():
def before():
data = relay.const(np.arange(10).astype(np.float32))
dropout = relay.nn.dropout(data)
add = dropout + relay.const(1.0)
return relay.Function(relay.analysis.free_vars(add), add)
passes = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
]
)
before_mod = tvm.IRModule.from_expr(before())
with tvm.transform.PassContext(opt_level=3):
after_mod = passes(before_mod)
tvm.ir.assert_structural_equal(run_infer_type(before_mod["main"]), after_mod["main"])
def test_fold_qnn_const():
def before():
add = relay.qnn.op.add(
relay.const(np.ones((2, 3), dtype="uint8"), dtype="uint8"),
relay.const(np.ones((2, 3), dtype="uint8"), dtype="uint8"),
lhs_scale=relay.const(2.0),
lhs_zero_point=relay.const(0),
rhs_scale=relay.const(2.0),
rhs_zero_point=relay.const(0),
output_scale=relay.const(1.0),
output_zero_point=relay.const(0),
)
a = relay.var("a", shape=[2, 3], dtype="float32")
dense = relay.qnn.op.dense(
relay.qnn.op.quantize(a, relay.const(1.0), relay.const(0)),
add,
input_zero_point=relay.const(0),
kernel_zero_point=relay.const(0),
input_scale=relay.const(2.0),
kernel_scale=relay.const(2.0),
units=None,
)
b = relay.var("b", shape=[2], dtype="float32")
bias = relay.qnn.op.add(
dense,
relay.qnn.op.quantize(b, relay.const(1.0), relay.const(0), out_dtype="int32"), |
lhs_scale=relay.const(2.0),
lhs_zero_point=relay.const(0),
rhs_scale=relay.const(2.0),
rhs_zero_point=relay.const(0),
output_scale=relay.const(1.0),
output_zero_point=relay.const(0),
)
return relay.Function([a, b], bias)
def expected():
a = relay.var("a", shape=[2, 3], dtype="float32")
dense = relay.qnn.op.dense(
relay.qnn.op.quantize(a, relay.const(1.0), relay.const(0)),
relay.const(np.array([[4, 4, 4], [4, 4, 4]], dtype="uint8"), dtype="uint8"),
input_zero_point=relay.const(0),
kernel_zero_point=relay.const(0),
input_scale=relay.const(2.0),
kernel_scale=relay.const(2.0),
units=None,
)
b = relay.var("b", shape=[2], dtype="float32")
bias = relay.qnn.op.add(
dense,
relay.qnn.op.quantize(b, relay.const(1.0), relay.const(0), out_dtype="int32"),
lhs_scale=relay.const(2.0),
lhs_zero_point=relay.const(0),
rhs_scale=relay.const(2.0),
rhs_zero_point=relay.const(0),
output_scale=relay.const(1.0),
output_zero_point=relay.const(0),
)
return relay.Function([a, b], bias)
a = run_opt_pass(before(), transform.FoldConstant())
b = run_opt_pass(before(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
a = run_opt_pass(before(), transform.FoldConstant(fold_qnn=True))
b = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
def test_fold_quantize():
t = relay.TensorType([1, 2, 3], "int8")
def before():
data = tvm.nd.array(np.array([1.0, 2.0, 3.0], dtype="float32"))
const_fp = relay.const(data, dtype="float32")
const_i8 = relay.qnn.op.quantize(
const_fp, output_scale=relay.const(0.5), output_zero_point=relay.const(0)
)
x = relay.var("x", t)
sub = relay.op.subtra |
ct(x, const_i8)
func = relay.Function([x], sub)
return func
def expected():
data = tvm.nd.array(np.array([2, 4, 6], dtype="int8"))
const_i8 = relay.const(data, dtype="int8")
x = relay.var("x", t)
sub = relay.op.subtract(x, const_i8)
func = relay.Function([x], sub)
return func
a = run_opt_pass(before(), transform.FoldConstant())
b = run_opt_pass(before(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
a = run_opt_pass(before(), transform.FoldConstant(fold_qnn=True))
b = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
def test_fold_qnn_conv2d_qnn_mul():
def before():
dtype = "uint8"
op0 = relay.qnn.op.conv2d(
relay.const(np.ones((1, 1, 2, 2), dtype=dtype), dtype=dtype),
relay.const(np.ones((1, 1, 2, 2), dtype=dtype), dtype=dtype),
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(1.0, "float32"),
kernel_scale=relay.const(1.0, "float32"),
kernel_size=(2, 2),
channels=1,
)
op = relay.qnn.op.mul(
op0,
relay.const(np.array([10], dtype="int32"), dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
)
func = relay.Function([], op)
return func
def expected():
data = relay.const(np.array([[[[40]]]], dtype="int32"), dtype="int32")
func = relay.Function([], data)
return func
a = run_opt_pass(before(), transform.FoldConstant())
b = run_opt_pass(before(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
a = run_opt_pass(before(), transform.Fold |
Constant(fold_qnn=True))
b = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
def test_fold_requantize():
def before():
data = tvm.nd.array(np.array([1, 2, 3], dtype="int8"))
const_i8 = relay.const(data, dtype="int8")
op = relay.qnn.op.requantize(
const_i8,
input_scale=relay.const(2.0, dtype="float32"),
input_zero_point=relay.const(1, dtype="int32"),
output_scale=relay.const(1.0, dtype="float32"),
output_zero_point=relay.const(1, dtype="int32"),
)
x = relay.var("x", relay.TensorType([3], "int8"))
add = relay.op.add(op, x)
func = relay.Function([x], add)
return func
def expected():
data = tvm.nd.array(np.array([1, 3, 5], dtype="int8"))
const_i8 = relay.const(data, dtype="int8")
x = relay.var("x", relay.TensorType([3], "int8"))
add = relay.op.add(const_i8, x)
func = relay.Function([x], add)
return func
a = run_opt_pass(before(), transform.FoldConstant())
b = run_opt_pass(before(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
a = run_opt_pass(before(), transform.FoldConstant(fold_qnn=True))
b = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
def test_pass_link_params():
"""
This test checks ensures that proper executor is passed to interpreter instance
The test will fail if FoldConstant does not override the executor due to "int8"
is not supported in ScheduleBuilder
"""
def expr():
z = relay.const(10, dtype="int8")
return relay.cast(z, dtype="int32")
mod = tvm.IRModule.from_expr(expr())
mod = tvm.relay.transform.InferType()(mod)
mod = mod.with_attr("executor", Executor("aot", {"link-params": True}))
mod = tvm.relay.transform.FoldConstant()(mod)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import relay
from tvm.relay |
import transform
from tvm.relay.testing |
import run_opt_pass |
import numpy as np
def test_simplify_conv_pad():
convs = [relay.nn.conv1d, relay.nn.conv2d, relay.nn.conv3d]
def validate(ndim, pad_width, pad_value, pad_mode, orig_padding, layout, no_fold=False):
if layout[1] == "C":
shape = [1, 3] + [10] * ndim
wshape = [8, 3] + [3] * ndim
elif layout[-1] == "C":
shape = [1] + [10] * ndim + [3]
wshape = [8] + [3] * ndim + [3]
else:
raise ValueError("This test only supports NC* and N*C")
x = relay.var("x", shape=shape, dtype="float32")
w = relay.var("w", shape=wshape, dtype="float32")
pad = relay.nn.pad(x, pad_width, pad_value, pad_mode)
if layout[1] == "C":
conv = convs[ndim - 1](pad, w, padding=orig_padding)
else:
conv = convs[ndim - 1](
pad, w, padding=orig_padding, data_layout=layout, kernel_layout="DHWIO"[3 - ndim :]
)
if pad_mode == "constant" and pad_value == 0:
new_padding = []
for j in range(2):
for i in range(len(pad_width)):
if layout[i] in ["D", "H", "W"]:
new_padding.append(pad_width[i][j])
for i in range(len(new_padding)):
new_padding[i] += orig_padding[i]
if layout[1] == "C":
after = convs[ndim - 1](x, w, padding=new_padding)
else:
after = convs[ndim - 1](
x, w, padding=new_padding, data_layout=layout, kernel_layout="DHWIO"[3 - ndim :]
)
else:
after = conv
zz = run_opt_pass(conv, transform.FoldExplicitPadding())
expected = run_opt_pass(after, transform.InferType())
assert tvm.ir.structural_equal(zz, expected)
mod1 = tvm.IRModule.from_expr(conv)
mod2 = tvm.IRModule.from_expr(zz)
if not no_fold:
op_freqs = relay.analysis.list_op_freqs(mod2)
assert "nn.pad" not in op_freqs |
with tvm.transform.PassContext():
func1 = relay.create_executor(
"vm", mod=mod1, device=tvm.cpu(), target="llvm"
).evaluate()
func2 = relay.create_executor("vm", mod=mod2, device=tvm.cpu(), target="llvm").evaluate()
x_np = np.random.rand(*shape).astype("float32")
w_np = np.random.rand(*wshape).astype("float32")
result1 = func1(x_np, w_np)
result2 = func2(x_np, w_np)
tvm.testing.assert_allclose(result1.numpy(), result2.numpy(), rtol=1e-5, atol=1e-5)
for orig_pad in [[0, 0], [2, 0], [0, 2]]:
for i_pad in [[0, 0], [1, 1], [1, 0]]:
for ndim in [1, 2, 3]:
for channels_last in [0, 1]:
if channels_last:
layout = "NDHWC"
layout = layout[0:1] + layout[4 - ndim : 4] + layout[-1:]
padding = [[0, 0]] + [i_pad] * ndim + [[0, 0]]
else:
layout = "NCDHW"
layout = layout[0:2] + layout[5 - ndim :]
padding = [[0, 0]] * 2 + [i_pad] * ndim
validate(ndim, padding, 0, "constant", orig_pad * ndim, layout)
ndim = 2
validate(
ndim, [[0, 0]] * 2 + [i_pad] * ndim, 1, "constant", orig_pad * ndim, "NCHW", no_fold=True
)
validate(ndim, [[0, 0]] * 2 + [i_pad] * ndim, 0, "edge", orig_pad * ndim, "NCHW", no_fold=True)
def get_min_value(dtype):
if np.issubdtype(dtype, np.floating):
return np.finfo(dtype).min
elif np.issubdtype(dtype, np.integer):
return np.iinfo(dtype).min
else:
raise ValueError("Cannot get min value for dtypes that are not integer or not floating")
def test_simplify_pool_pad():
max_pools = [relay.nn.max_pool1d, relay.nn.max_pool2d, relay.nn.max_pool3d]
avg_pools = [relay.nn.avg_pool1d, relay.nn.avg_pool2d, relay.nn.avg_pool3d]
def validate(
pools,
ndim,
pad_width,
pad_v |
alue,
orig_padding,
layout,
pool_size,
pad_mode="constant",
dtype="float32",
no_fold=False,
**kwargs,
):
pad_value_const = relay.const(pad_value, dtype=dtype)
if layout[1] == "C":
shape = [1, 3] + [10] * ndim
elif layout[-1] == "C":
shape = [1] + [10] * ndim + [3]
else:
raise ValueError("This test only supports NC* and N*C")
x = relay.var("x", shape=shape, dtype=dtype)
pad = relay.nn.pad(x, pad_width, pad_value_const, pad_mode)
if layout[1] == "C":
pool = pools[ndim - 1](pad, padding=orig_padding, pool_size=pool_size, **kwargs)
else:
pool = pools[ndim - 1](
pad, padding=orig_padding, layout=layout, pool_size=pool_size, **kwargs
)
if pools == max_pools:
foldable_pad_value = get_min_value(dtype)
else:
foldable_pad_value = 0
if pad_mode == "constant" and pad_value == foldable_pad_value:
new_padding = []
for j in range(2):
for i in range(len(pad_width)):
if layout[i] in ["D", "H", "W"]:
new_padding.append(pad_width[i][j])
for i in range(len(new_padding)):
new_padding[i] += orig_padding[i]
if pools == avg_pools and all(v == 0 for v in orig_padding):
kwargs["count_include_pad"] = True
if layout[1] == "C":
after = pools[ndim - 1](x, padding=new_padding, pool_size=pool_size, **kwargs)
else:
after = pools[ndim - 1](
x, padding=new_padding, layout=layout, pool_size=pool_size, **kwargs
)
else:
after = pool
zz = run_opt_pass(pool, transform.FoldExplicitPadding())
expected = run_opt_pass(after, transform.InferType())
assert tvm.ir.structural_e |
qual(zz, expected)
mod1 = tvm.IRModule.from_expr(pool)
mod2 = tvm.IRModule.from_expr(zz)
if not no_fold:
op_freqs = relay.analysis.list_op_freqs(mod2)
assert "nn.pad" not in op_freqs
with tvm.transform.PassContext():
func1 = relay.create_executor(
"vm", mod=mod1, device=tvm.cpu(), target="llvm"
).evaluate()
func2 = relay.create_executor("vm", mod=mod2, device=tvm.cpu(), target="llvm").evaluate()
x_np = np.random.rand(*shape).astype(dtype)
result1 = func1(x_np)
result2 = func2(x_np)
tvm.testing.assert_allclose(result1.numpy(), result2.numpy(), rtol=1e-5, atol=1e-5)
float_min_val = get_min_value("float32")
for orig_pad in [[0, 0], [2, 0]]:
for i_pad in [[1, 1], [1, 0]]:
for ndim in [1, 2, 3]:
for channels_last in [0, 1]:
if channels_last:
layout = "NDHWC"
layout = layout[0:1] + layout[4 - ndim : 4] + layout[-1:]
padding = [[0, 0]] + [i_pad] * ndim + [[0, 0]]
else:
layout = "NCDHW"
layout = layout[0:2] + layout[5 - ndim :]
padding = [[0, 0]] * 2 + [i_pad] * ndim
validate(max_pools, ndim, padding, float_min_val, orig_pad * ndim, layout, 2)
validate(max_pools, 1, [[0, 0], [0, 0], [0, 0]], float_min_val, [2, 0], "NCW", 2)
int_min_val = get_min_value("uint8")
validate(
max_pools,
2,
[[0, 0], [0, 0], [0, 2], [2, 0]],
int_min_val,
[2, 0, 0, 0],
"NCHW",
2,
dtype="uint8",
)
validate(
avg_pools,
2,
[[0, 0], [0, 0], [0, 2], [2, 0]],
0,
[0, 0, 1, 0],
"NCHW",
2,
count_include_pad=True,
)
validate(avg_pools, 2, [[0, 0], [0, 0], [0, 2], [2, 0]], 0, [0, 0, 0, 0], "NCH |
W", 2)
validate(
avg_pools, 2, [[0, 0], [0, 0], [0, 2], [2, 0]], 0, [0, 0, 0, 0], "NCHW", 2, no_fold=True
)
validate(max_pools, 1, [[0, 0], [0, 0], [0, 2]], 0, [0, 0], "NCHW", 2, no_fold=True)
validate(avg_pools, 1, [[0, 0], [0, 0], [0, 2]], 1, [0, 0], "NCHW", 2, no_fold=True)
validate(
avg_pools, 1, [[0, 0], [0, 0], [0, 2]], 0, [0, 0], "NCHW", 2, pad_mode="edge", no_fold=True
)
def test_fold_pad_qconv2d():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
input_zero_point = 10
pad = relay.nn.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], pad_value=input_zero_point)
return relay.qnn.op.conv2d(
pad,
weight,
relay.const(input_zero_point, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(0, 0),
data_layout="NHWC",
kernel_layout="HWIO",
)
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
input_zero_point = 10
return relay.qnn.op.conv2d(
x,
weight,
relay.const(input_zero_point, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
a = run_opt_pass(before(), relay.transform.FoldExplicitPadding())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b, map_free_vars=True), "Actual = \n" + str(a)
def test_pad_qconv2d_no_fold():
def get_expr():
x = relay.var("x", shape=(1, |
1, 2, 2), dtype="int8")
weight = relay.var("weight", shape=(1, 1, 2, 2), dtype="int8")
pad_value = 1
input_zero_point = 0
pad = relay.nn.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], pad_value=pad_value)
return relay.qnn.op.conv2d(
pad,
weight,
relay.const(input_zero_point, "int32"),
relay.const(0, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=1,
kernel_size=(2, 2),
padding=(0, 0),
)
a = run_opt_pass(get_expr(), relay.transform.FoldExplicitPadding())
b = run_opt_pass(get_expr(), transform.InferType())
assert tvm.ir.structural_equal(a, b, map_free_vars=True), (
"\nActual = \n" + str(a) + "\nExpected = \n" + str(b)
)
if __name__ == "__main__":
test_simplify_conv_pad()
test_simplify_pool_pad()
test_fold_pad_qconv2d()
test_pad_qconv2d_no_fold() |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import transform
from tvm.relay.testing |
import create_workload
from tvm.relay.build_module |
import bind_params_by_name
def initializer(_, param):
param = np.zeros(param.shape)
def _get_positive_scale(size):
return np.random.uniform(0.5, 1, size=size).astype("float32")
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_fold_fwd_simple():
"""Simple testcase."""
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
args = [x, conv_weight, in_bias]
x = relay.multiply(x, in_scale)
x = relay.nn.relu(x)
x = relay.add(x, in_bias)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW2i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def expected(x, conv_weight, in_bias, in_scale, in_channels, channels, blocking):
args = [x, conv_weight, in_bias]
if blocking:
squeezed_scale = relay.squeeze(in_scale, axis=[0, 2, 3])
x = relay.nn.relu(x)
in_bias = relay.divide(
in_bias,
relay.reshape(squeezed_scale, (1, in_channels
)
x = relay.add(x, in_bias)
conv_weight = relay.multiply(
conv_weight, relay.reshape(squeezed_scale, (1, in_channels
)
else:
squeezed_scale = relay.squeeze(in_scale, axis=[1, 2])
x = relay.nn.relu(x)
in_bias = relay.divide(
in_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2)
)
x = relay.add(x, in_bias)
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, nu |
m_newaxis=2)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW2i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def check(shape, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
in_channels = shape[1] * shape[4]
in_bias = relay.var("in_bias", shape=(1, in_channels
in_scale = relay.const(
_get_positive_scale((1, in_channels
)
else:
in_channels = shape[1]
in_bias = relay.var("in_bias", shape=(in_channels, 1, 1))
in_scale = relay.const(_get_positive_scale((in_channels, 1, 1)))
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
y1_expected = expected(x, weight, in_bias, in_scale, in_channels, channels, blocking)
y1_folded = run_opt_pass(y1_folded, transform.InferType())
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 2, None)
check((2, 2, 10, 10, 2), 8, (2, 4))
def test_fold_fwd_dual_path():
"""scale axis being consumed by two consumers"""
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
args = [x, conv_weight, in_bias]
x = relay.multiply(in_scale, x)
x = relay.nn.relu(x)
x = relay.subtract(x, in_bias)
y1 = relay.nn.conv2d(
x,
conv_weight,
channels=channe |
ls,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
groups=channels,
padding=(1, 1),
)
y2 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
groups=channels,
padding=(1, 1),
)
z = relay.add(y1, y2)
return relay.Function(args, z)
def expected(x, conv_weight, in_bias, in_scale, channels, blocking):
args = [x, conv_weight, in_bias]
x = relay.nn.relu(x)
if blocking:
_in_scale = relay.reshape(
in_scale, (1, 1, 1, channels
)
else:
_in_scale = in_scale
in_bias = relay.divide(in_bias, _in_scale)
x = relay.subtract(x, in_bias)
if blocking:
_in_scale = relay.reshape(
in_scale, (1, 1, 1, channels
)
y1 = relay.nn.conv2d(
x,
relay.multiply(conv_weight, _in_scale),
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
groups=channels,
padding=(1, 1),
)
if blocking:
_in_scale = relay.reshape(
in_scale, (1, 1, 1, channels
)
y2 = relay.nn.conv2d(
x,
relay.multiply(conv_weight, _in_scale),
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "H |
WIO",
groups=channels,
padding=(1, 1),
)
z = relay.add(y1, y2)
return relay.Function(args, z)
def check(dshape, channels, blocking):
x = relay.var("x", shape=dshape)
if blocking:
in_channels = dshape[3] * dshape[4]
wshape = (3, 3, 1, channels
weight = relay.var("weight", shape=wshape)
in_bias = relay.var("in_bias", shape=(in_channels
in_scale = relay.const(_get_positive_scale((in_channels
else:
in_channels = dshape[-1]
wshape = (3, 3, 1, channels)
weight = relay.var("weight", shape=wshape)
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(
_get_positive_scale(
in_channels,
)
)
assert in_channels == channels
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_expected = expected(x, weight, in_bias, in_scale, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 3), 3, None)
check((2, 4, 10, 2, 2), 4, (2, 2))
def test_fold_fwd_fail():
"""testcase where we canont fold"""
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
x = relay.multiply(x, in_scale)
xx = relay.nn.leaky_relu(x, alpha=0.1)
y1 = relay.nn.conv2d(
xx,
conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking el |
se "HWIO",
padding=(1, 1),
)
z = relay.add(y1, x)
return relay.Function(relay.analysis.free_vars(z), z)
def check(shape, channels, blocking):
x = relay.var("x", shape=shape)
if blocking:
in_channels = shape[3] * shape[4]
in_bias = relay.var("in_bias", shape=(in_channels
in_scale = relay.const(_get_positive_scale((in_channels
else:
in_channels = shape[-1]
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(_get_positive_scale(size=(in_channels,)))
assert in_channels == channels
weight = relay.var("weight")
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
assert tvm.ir.structural_equal(y1, y1_folded)
check((2, 11, 10, 4), 4, None)
check((2, 11, 10, 2, 2), 4, (2, 2))
def test_fold_fwd_relu_fail():
"""testcase where we canont fold because scale can not pass relu"""
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
x = relay.multiply(x, in_scale)
xx = relay.nn.relu(x)
y1 = relay.nn.conv2d(
xx,
conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
padding=(1, 1),
)
z = relay.add(y1, x)
return relay.Function(relay.analysis.free_vars(z), z)
def check(shape, channels, blocking, in_scale):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
in_channels = shape[3] * shape[4]
in_bias = relay.var("in_bias", shape=(1, in_channels
else:
in_channels = shape[-1]
in_bias = relay.var("in_bias |
", shape=(in_channels,))
assert in_channels == channels
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
assert tvm.ir.structural_equal(y1, y1_folded)
in_scale = relay.var("in_scale", shape=(4,))
check((2, 11, 10, 4), 4, None, in_scale)
in_scale = relay.const(-_get_positive_scale((4,)))
check((2, 11, 10, 4), 4, None, in_scale)
in_scale = relay.var("in_scale", shape=(1, 1, 1, 2, 2))
check((2, 11, 10, 2, 2), 4, (2, 2), in_scale)
in_scale = relay.const(-_get_positive_scale((1, 1, 1, 2, 2)))
check((2, 11, 10, 2, 2), 4, (2, 2), in_scale)
def test_fold_fwd_let_fail():
"""testcase where we canont fold"""
def before(x, conv_weight, in_bias, in_scale, channels):
args = [x, conv_weight, in_bias]
x = relay.multiply(x, in_scale)
x = relay.nn.relu(x)
x = relay.add(x, in_bias)
x_var = relay.Var("x_var")
y1 = relay.nn.conv2d(
x_var,
conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
padding=(1, 1),
)
z = relay.add(y1, x)
let = relay.Let(x_var, x, z)
return relay.Function(args, let)
def check(shape, channels):
x = relay.var("x", shape=shape)
in_channels = shape[-1]
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(_get_positive_scale(size=(in_channels,)))
assert in_channels == channels
weight = relay.var("weight")
y1 = before(x, weight, in_bias, in_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
assert tvm.ir.structural_equal(y1, y1_folded)
check((2, 11, 10, 4), 4)
def test_fold_fwd_negative_scale():
"""Testcase of fol |
ding negative scale"""
def before(x, conv_weight, in_scale, channels, blocking):
args = [x, conv_weight]
x = relay.multiply(x, in_scale)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW4i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def expected(x, conv_weight, in_scale, in_channels, channels, blocking):
args = [x, conv_weight]
if blocking:
squeezed_scale = relay.squeeze(in_scale, axis=[0, 2, 3])
conv_weight = relay.multiply(
conv_weight, relay.reshape(squeezed_scale, (1, in_channels
)
else:
squeezed_scale = relay.squeeze(in_scale, axis=[1, 2])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW4i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def check(shape, channels, blocking):
x = relay.var("x", shape=shape)
if blocking:
in_channels = shape[1] * shape[4]
in_scale = relay.const(-_get_positive_scale((1, shape[1], 1, 1, shape[4])))
else:
in_channels = shape[1]
in_scale = relay.const(-_get_positive_scale((in_channels, 1, 1)))
weight = relay.var("weight")
y1 = before(x, weight, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x i |
n y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
y1_expected = expected(x, weight, in_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4, None)
check((2, 2, 10, 10, 2), 8, (2, 2))
def test_fold_fwd_dense():
"""dense testcase."""
def before(x, weight, in_bias, in_scale):
args = [x, weight, in_bias]
x = relay.multiply(x, in_scale)
x = relay.nn.relu(x)
x = relay.add(x, in_bias)
y = relay.nn.dense(x, weight)
return relay.Function(args, y)
def expected(x, weight, in_bias, in_scale):
args = [x, weight, in_bias]
x = relay.nn.relu(x)
in_bias = relay.divide(in_bias, in_scale)
x = relay.add(x, in_bias)
weight = relay.multiply(weight, in_scale)
y = relay.nn.dense(x, weight)
return relay.Function(args, y)
def check(data_shape, weight_shape):
x = relay.var("x", shape=data_shape)
weight = relay.var("weight", shape=weight_shape)
in_channels = data_shape[1]
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(_get_positive_scale((in_channels,)))
y1 = before(x, weight, in_bias, in_scale)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
y1_expected = expected(x, weight, in_bias, in_scale)
y1_folded = run_opt_pass(y1_folded, transform.InferType())
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4), (3, 4))
check((3, 5), (4, 5))
def test_fold_bwd_simple():
"""Simple testcase."""
def before(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [ |
x, conv_weight, out_bias]
if blocking:
out_bias = relay.reshape(out_bias, (1, channels
else:
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y = relay.add(y, out_bias)
y = relay.nn.relu(y)
if blocking:
out_scale = relay.reshape(out_scale, (1, channels
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
if blocking:
out_bias = relay.reshape(out_bias, (1, channels
out_scale = relay.reshape(out_scale, (1, channels
squeezed_scale = relay.squeeze(out_scale, axis=[0, 2, 3])
conv_weight = relay.multiply(
conv_weight,
relay.reshape(squeezed_scale, (channels
)
else:
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
if blocking:
out_bias = relay.multiply(
out_bias,
relay.reshape(squeezed_scale, (1, channels
) |
else:
out_bias = relay.multiply(
out_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2)
)
y = relay.add(y, out_bias)
y = relay.nn.relu(y)
return relay.Function(args, y)
def check(shape, in_channels, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
out_bias = relay.var("out_bias", shape=(channels,))
if blocking:
out_scale = relay.const(_get_positive_scale((channels,)))
else:
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4, 8, None)
check((2, 2, 10, 10, 16), 32, 64, (16, 16))
def test_fold_bwd_dual_path():
"""Dual path testcase."""
def before(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
y1 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if |
blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
if not blocking:
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
def fold_conv_weight():
if blocking:
return relay.multiply(
conv_weight,
relay.reshape(
squeezed_scale, (channels
),
)
else:
return relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y1 = relay.nn.conv2d(
x,
fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
x,
fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
return relay.Function(args, y)
def check(shape, in_channels, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
out_bias = relay.var("out_bias", shape=(channels
out_scale = relay.const( |
_get_positive_scale((channels
)
else:
out_bias = relay.var("out_bias", shape=(channels,))
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4, 8, None)
check((2, 2, 10, 10, 2), 4, 8, (2, 2))
def test_fold_bwd_simple_constant():
def before(data, weight, out_bias, channels):
y = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
y = relay.add(y, out_bias)
c2 = relay.const(2.0)
y = relay.nn.relu(y)
y = relay.multiply(y, c2)
mod, params = create_workload(y, initializer)
mod["main"] = bind_params_by_name(mod["main"], params)
return mod
def expected(data, weight, out_bias, channels):
y0 = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
y0 = relay.add(y0, out_bias)
y0 = relay.nn.relu(y0)
mod, params = create_workload(y0, initializer)
mod["main"] = bind_params_by_name(mod["main"], params)
return mod
def check(shape, channels):
x = relay.var("data", relay.TensorType(shape, "float32"))
weight = relay.var("weight")
out_bias = relay.var("in_bias", shape=(channels, 1, 1))
y0 = before(x, weight, out_bias, channels)
remove_last_multiply = tvm.transform.Sequential(
[ |
relay.transform.InferType(),
relay.transform.FoldScaleAxis(),
]
)
with tvm.transform.PassContext(opt_level=3):
y0 = remove_last_multiply(y0)
_expect = expected(x, weight, out_bias, channels)
tvm.ir.assert_structural_equal(y0, _expect)
check((1, 3, 200, 200), 16)
def test_fold_bwd_dual_consumer():
def before(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
y0 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y0 = relay.multiply(y0, out_scale)
y0 = relay.nn.relu(y0)
y1 = relay.nn.conv2d(
y0,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y1 = relay.multiply(y1, out_scale)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
y0,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y2 = relay.multiply(y2, out_scale)
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
def fold_conv_weight():
squeezed_scale = relay.squeeze(out_scale, axis= |
[1, 2])
if blocking:
return relay.multiply(
conv_weight,
relay.reshape(
squeezed_scale, (channels
),
)
else:
return relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y0 = relay.nn.conv2d(
x,
fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y0 = relay.nn.relu(y0)
y1 = relay.nn.conv2d(
y0,
fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
y0,
fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
return relay.Function(args, y)
def check(shape, in_channels, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
out_bias = relay.var("out_bias", shape=(channels
out_scale = relay.const(
_get_positive_scale((channels
)
else:
out_bias = relay.var("out_bias", shape=(channels,))
out_scale = relay.const(_get_positive_scale((channels, 1, 1))) |
y1 = before(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4, 4, None)
check((2, 2, 10, 10, 2), 4, 4, (2, 2))
def test_fold_bwd_fail():
"""Dual path testcase."""
def fail1(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
y1 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
out_layout="CNHW{}c".format(blocking[1]) if blocking else "CNHW",
)
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def fail2(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
y1 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padd |
ing=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y2 = relay.nn.relu(y1)
y1 = relay.multiply(y1, out_scale)
y = relay.add(y1, y2)
return relay.Function(args, y)
def check(shape, in_channels, channels, blocking, fbefore):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
out_bias = relay.var("out_bias", shape=(channels
out_scale = relay.const(
_get_positive_scale((channels
)
else:
out_bias = relay.var("out_bias", shape=(channels, 1, 1))
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = fbefore(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
assert tvm.ir.structural_equal(y1_folded, y1)
check((4, 4, 10, 10), 4, 4, None, fail1)
check((2, 2, 10, 10, 2), 4, 4, (2, 2), fail1)
check((4, 4, 10, 10), 4, 4, None, fail2)
check((4, 2, 10, 10, 2), 4, 4, (2, 2), fail2)
def test_fold_bwd_relu_fail():
"""testcase where we canont fold because scale can not pass relu"""
def before(x, conv_weight, out_scale, channels, blocking):
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y = relay.nn.relu(y)
y = relay.multiply(x, out_scale)
return relay.Function(relay.analysis.free_vars(y), y)
def check(shape, channels, blocking, out_scale):
x = relay.var("x", shape=shape)
in_channels = shape[1]
weight = |
relay.var("weight")
y1 = before(x, weight, out_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
assert tvm.ir.structural_equal(y1, y1_folded)
out_scale = relay.var("in_scale", shape=(4, 1, 1))
check((4, 4, 10, 10), 4, None, out_scale)
out_scale = relay.const(np.random.uniform(size=(4, 1, 1), low=-1.0, high=0.0)).astype("float32")
check((4, 4, 10, 10), 4, None, out_scale)
out_scale = relay.var("in_scale", shape=(1, 2, 1, 1, 2))
check((4, 2, 10, 10, 2), 4, (2, 2), out_scale)
out_scale = relay.const(np.random.uniform(size=(1, 2, 1, 1, 2), low=-1.0, high=0.0)).astype(
"float32"
)
check((4, 2, 10, 10, 2), 4, (2, 2), out_scale)
def test_fold_bwd_negative_scale():
"""Testcase of folding negative scale"""
def before(x, conv_weight, out_scale, channels, blocking):
args = [x, conv_weight]
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_scale, channels, blocking):
args = [x, conv_weight]
if blocking:
squeezed_scale = relay.squeeze(out_scale, axis=[0, 2, 3])
conv_weight = relay.multiply(
conv_weight,
relay.reshape(squeezed_scale, (channels
)
else:
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels, |
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def check(shape, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
out_scale = relay.const(
-_get_positive_scale((1, channels
)
else:
out_scale = relay.const(-_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_scale, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 8, None)
check((2, 2, 10, 10, 2), 8, (2, 2))
def test_fold_bwd_dense():
"""dense testcase."""
def before(x, weight, in_bias, in_scale):
args = [x, weight, in_bias]
x = relay.nn.dense(x, weight)
x = relay.add(x, in_bias)
x = relay.nn.relu(x)
y = relay.multiply(x, in_scale)
return relay.Function(args, y)
def expected(x, weight, in_bias, in_scale):
args = [x, weight, in_bias]
scale = relay.expand_dims(in_scale, axis=1)
weight = relay.multiply(weight, scale)
x = relay.nn.dense(x, weight)
bias = relay.multiply(in_bias, in_scale)
x = relay.add(x, bias)
y = relay.nn.relu(x)
return relay.Function(args, y)
def check(data_shape, weight_shape):
x = relay.var("x", shape=data_shape)
weight = relay.var("weight", shape=weight_shape) |
out_channels = weight_shape[0]
in_bias = relay.var("in_bias", shape=(out_channels,))
in_scale = relay.const(_get_positive_scale((out_channels,)))
y1 = before(x, weight, in_bias, in_scale)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, in_bias, in_scale)
y1_folded = run_opt_pass(y1_folded, transform.InferType())
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4), (3, 4))
check((3, 5), (4, 5))
def test_fold_bwd_bias_add():
"""bias add testcase."""
def before(x, conv_weight, out_bias, out_scale, channels):
args = [x, conv_weight, out_bias]
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.bias_add(y, out_bias)
y = relay.nn.relu(y)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, channels):
args = [x, conv_weight, out_bias]
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
out_bias = relay.multiply(out_bias, squeezed_scale)
y = relay.nn.bias_add(y, out_bias)
y = relay.nn.relu(y)
return relay.Function(args, y)
def check(shape, channels):
x = relay.var("x", shape=shape)
weight = relay.var("weight") |
out_bias = relay.var("out_bias", shape=(channels,))
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, channels)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4)
def test_fold_fwd_conv3d():
"""Conv3d testcase."""
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
args = [x, conv_weight, in_bias]
x = relay.multiply(x, in_scale)
x = relay.nn.relu(x)
x = relay.add(x, in_bias)
y = relay.nn.conv3d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
data_layout="NCDHW{}c".format(blocking[0]) if blocking else "NCDHW",
kernel_layout="OIDHW2i{}o".format(blocking[1]) if blocking else "OIDHW",
)
return relay.Function(args, y)
def expected(x, conv_weight, in_bias, in_scale, in_channels, channels, blocking):
args = [x, conv_weight, in_bias]
if blocking:
squeezed_scale = relay.squeeze(in_scale, axis=[0, 2, 3, 4])
x = relay.nn.relu(x)
in_bias = relay.divide(
in_bias,
relay.reshape(
squeezed_scale, (1, in_channels
),
)
x = relay.add(x, in_bias)
conv_weight = relay.multiply(
conv_weight, relay.reshape(squeezed_scale, (1, in_channels
)
else:
squeezed_scale = relay.squeeze(in_scale, axis=[1, 2, 3])
x = relay |
.nn.relu(x)
in_bias = relay.divide(
in_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
x = relay.add(x, in_bias)
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y = relay.nn.conv3d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
data_layout="NCDHW{}c".format(blocking[0]) if blocking else "NCDHW",
kernel_layout="OIDHW2i{}o".format(blocking[1]) if blocking else "OIDHW",
)
return relay.Function(args, y)
def check(shape, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
in_channels = shape[1] * shape[-1]
in_bias = relay.var(
"in_bias", shape=(1, in_channels
)
in_scale = relay.const(
_get_positive_scale((1, in_channels
)
else:
in_channels = shape[1]
in_bias = relay.var("in_bias", shape=(in_channels, 1, 1, 1))
in_scale = relay.const(_get_positive_scale((in_channels, 1, 1, 1)))
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
y1_expected = expected(x, weight, in_bias, in_scale, in_channels, channels, blocking)
y1_folded = run_opt_pass(y1_folded, transform.InferType())
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10, 10), 2, None)
check((2, 2, 10, 10, 10, 2), 8, (2, 4))
def test_fold_bwd_conv3d():
"""Conv3d testcas |
e."""
def before(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
if blocking:
out_bias = relay.reshape(out_bias, (1, channels
else:
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=3)
y = relay.nn.conv3d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
data_layout="NCDHW{}c".format(blocking[0]) if blocking else "NCDHW",
kernel_layout="OIDHW1i{}o".format(blocking[1]) if blocking else "OIDHW",
)
y = relay.add(y, out_bias)
y = relay.nn.relu(y)
if blocking:
out_scale = relay.reshape(out_scale, (1, channels
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
if blocking:
out_bias = relay.reshape(out_bias, (1, channels
out_scale = relay.reshape(out_scale, (1, channels
squeezed_scale = relay.squeeze(out_scale, axis=[0, 2, 3, 4])
conv_weight = relay.multiply(
conv_weight,
relay.reshape(
squeezed_scale, (channels
),
)
else:
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=3)
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2, 3])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=4)
)
y = relay.nn.conv3d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
data_layout="NCDHW{}c".format(blocking[0]) if blocking else "NCDHW",
kernel_layout="OIDHW1i{}o".format(blocking[1]) if blocking else "OIDHW", |
)
if blocking:
out_bias = relay.multiply(
out_bias,
relay.reshape(squeezed_scale, (1, channels
)
else:
out_bias = relay.multiply(
out_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y = relay.add(y, out_bias)
y = relay.nn.relu(y)
return relay.Function(args, y)
def check(shape, in_channels, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
out_bias = relay.var("out_bias", shape=(channels,))
if blocking:
out_scale = relay.const(_get_positive_scale((channels,)))
else:
out_scale = relay.const(_get_positive_scale((channels, 1, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10, 10), 4, 8, None)
check((2, 2, 10, 10, 10, 16), 32, 64, (16, 16))
if __name__ == "__main__":
test_fold_fwd_simple()
test_fold_fwd_dual_path()
test_fold_fwd_fail()
test_fold_fwd_relu_fail()
test_fold_fwd_negative_scale()
test_fold_fwd_dense()
test_fold_bwd_simple_constant()
test_fold_bwd_simple()
test_fold_bwd_dual_path()
test_fold_bwd_dual_consumer()
test_fold_bwd_fail()
test_fold_bwd_relu_fail()
test_fold_bwd_negative_scale()
test_fold_bwd_dense()
test_fold_bwd_bias_add()
test_fold_fwd_conv3d()
test_fold_bwd_conv3d() |
import numpy as np |
import pytest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.