text
stringlengths 1
2.05k
|
---|
analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
y = relay.nn.layer_norm(y, gamma, beta, axis=1)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_InstanceNorm_convert_layout():
"""Check that layout transforms are propagated through instance norm."""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
y = relay.nn.instance_norm(y, gamma, beta, axis=3)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
dtype = "float32"
beta = |
relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
y = relay.nn.instance_norm(y, gamma, beta, axis=1)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_bn_convert_layout():
"""Check that layout transforms are propagated through bn."""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((64,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((64,), dtype))
y = relay.nn.batch_norm(y, gamma, beta, moving_mean, moving_var, axis=3)
y = relay.nn.relu(y[0])
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((64,), dtype))
movin |
g_var = relay.var("moving_var", relay.TensorType((64,), dtype))
y = relay.nn.batch_norm(y, gamma, beta, moving_mean, moving_var, axis=1)
y = relay.nn.relu(y[0])
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_requantize_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.qnn.op.requantize(
y,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
out_dtype="int32",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = |
relay.qnn.op.requantize(
y,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=1,
out_dtype="int32",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_concat_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.concatenate(
[y, y1],
[relay.const(1, "float32"), relay.const(1, "float32")],
[relay.const(1, "int32"), relay.const(1, "int32")],
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=3,
) |
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.qnn.op.conv2d(
y,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.concatenate(
[y, y1],
[relay.const(1, "float32"), relay.const(1, "float32")],
[relay.const(1, "int32"), relay.const(1, "int32")],
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=1,
)
ret = relay.layout_transform(ret, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_add_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="i |
nt8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.add(
y,
y1,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.qnn.op.conv2d(
y,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y1 = relay.qn |
n.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.add(
y,
y1,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
)
ret = relay.layout_transform(ret, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_nhwc_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56), dtype="int8")
weight = relay.var("weight", shape=(64, 64, 3, 3), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56), dtype="int8")
weight = relay.var("weight", shape=(64, 64, 3, 3), dtype="int8")
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.qnn.op.conv2d(
x,
weight, |
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_transpose_requantize_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d_transpose(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
y = relay.qnn.op.requantize(
y,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
out_dtype="int32",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "IOHW")
y = relay.qnn.op.conv2d_transpose(
x,
weight,
relay.const(1, "int32"), |
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
out_dtype="int32",
)
y = relay.qnn.op.requantize(
y,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=1,
out_dtype="int32",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d_transpose": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_convert_kernel_layout():
"""Check that convolution kernel layout is correctly transformed."""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
w = relay.layout_transform(w, "HWIO", "OHWI")
y = relay.nn.conv2d(
x,
w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="OHWI",
)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "OHWI"]}))
b = run_opt_pass(expected(), transform. |
InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_roi_align_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NHWC"
)
ret = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
desired_layouts = {
"nn.conv2d": ["NHWC", "HWIO"],
"vision.roi_align": ["NHWC", "default"],
}
a = run_opt_pass(a, transform.ConvertLayout(desired_layouts))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_strided_slice_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = rel |
ay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.strided_slice(y, begin=[0, 1], end=[1, -1, 10], strides=[1, 1, 2, 1])
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.strided_slice(y, begin=[0, 0, 0, 1], end=[1, 10, 56, -1], strides=[1, 2, 1, 1])
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_split_convert_layout():
def _test_conv_split_convert_layout1():
def before():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
y = relay.nn.conv2d(
x,
weight,
channels=512,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=2, axis=-1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
out = relay.Tuple([a, b])
return re |
lay.Function(analysis.free_vars(out), out)
def expected():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=512, kernel_size=(3, 3))
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=2, axis=1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
a = relay.layout_transform(a, "NCHW", "NHWC")
b = relay.layout_transform(b, "NCHW", "NHWC")
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_split_convert_layout2():
def before():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
y = relay.nn.conv2d(
x,
weight,
channels=512,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=2, axis=3).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
def expected():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.con |
v2d(x, weight, channels=512, kernel_size=(3, 3))
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=2, axis=1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
a = relay.layout_transform(a, "NCHW", "NHWC")
b = relay.layout_transform(b, "NCHW", "NHWC")
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_split_convert_layout3():
def before():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
y = relay.nn.conv2d(
x,
weight,
channels=512,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=(5, 10), axis=-1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
c = relay.TupleGetItem(y, 2)
out = relay.Tuple([a, b, c])
return relay.Function(analysis.free_vars(out), out)
def expected():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=512, kernel_size=(3, 3))
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=(5, 10), axis=1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
c = relay.TupleGetItem(y, 2) |
a = relay.layout_transform(a, "NCHW", "NHWC")
b = relay.layout_transform(b, "NCHW", "NHWC")
c = relay.layout_transform(c, "NCHW", "NHWC")
out = relay.Tuple([a, b, c])
return relay.Function(analysis.free_vars(out), out)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_split_convert_layout_blocking():
def before():
x = relay.var("x", shape=(1, 512, 38, 38))
weight = relay.var("weight", shape=(512, 512, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=512,
kernel_size=(3, 3),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=[256], axis=1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
def expected():
x = relay.var("x", shape=(1, 512, 38, 38))
weight = relay.var("weight", shape=(512, 512, 3, 3))
weight = relay.layout_transform(weight, "OIHW", "OIHW4o")
x = relay.layout_transform(x, "NCHW", "NCHW4c")
y = relay.op.nn.contrib_conv2d_nchwc(
x,
weight,
channels=512,
kernel_size=(3, 3),
padding=(0, 0),
data_layout="NCHW4c",
kernel_layout="OIHW4o",
)
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=[64], axis=1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
a = relay.layout_transform(a, "NCHW4c", "NCHW") |
b = relay.layout_transform(b, "NCHW4c", "NCHW")
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW4c", "OIHW4o"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
_test_conv_split_convert_layout1()
_test_conv_split_convert_layout2()
_test_conv_split_convert_layout3()
_test_conv_split_convert_layout_blocking()
def test_conv_strided_slice_axes_convert_layout():
def before():
x = relay.var("x", shape=(1, 28, 28, 32))
weight = relay.var("weight", shape=(3, 3, 32, 32))
y = relay.nn.conv2d(
x,
weight,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.strided_slice(y, begin=[0, 16], end=[1, 33], strides=[1, 1], axes=[0, 3])
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 28, 28, 32))
weight = relay.var("weight", shape=(3, 3, 32, 32))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(
x,
weight,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.strided_slice(y, begin=[0, 16], end=[1, 33], strides=[1, 1], axes=[0, 1])
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = run_opt_pass(before(), transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def te |
st_conv_topk_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.topk(y, k=2, axis=2)
if isinstance(y, relay.expr.TupleWrapper):
y = y.astuple()
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.topk(y, k=2, axis=3).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
a = relay.layout_transform(a, "NCHW", "NHWC")
b = relay.layout_transform(b, "NCHW", "NHWC")
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_roi_pool_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_pool(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, layout="NCHW"
)
y = relay.Fun |
ction(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_pool(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, layout="NHWC"
)
ret = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
desired_layouts = {
"nn.conv2d": ["NHWC", "HWIO"],
"vision.roi_pool": ["NHWC", "default"],
}
a = run_opt_pass(a, transform.ConvertLayout(desired_layouts))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_default_keyword():
"""Check that the default keyword selects correct TVM default layout."""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 3, 3, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
w = relay.var("weight", shape=(64, 3, 3, 64))
w = relay.layout_transform(w, "OHWI", "OIHW")
y = relay.nn.conv2d(
x,
w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW", |
kernel_layout="OIHW",
)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_different_ops_convert_layout():
"""Check convert layout correctly supports converting the layout of
different ops in the same graph.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 3, 3, 64))
weight2 = relay.var("weight2", shape=(64, 3, 3, 64), dtype="int8")
weight3 = relay.var("weight3", shape=(64, 3, 3, 64))
out = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
out = relay.cast(out, "int8")
out = relay.qnn.op.conv2d(
out,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
out = relay.cast(out, "float32")
out = relay.nn.conv2d_transpose(
out,
weight3,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
out = relay.Function(analysis.free_vars(out), out)
return out
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 3, 3, 64))
weight2 = relay.var("weight2", shape=(64, 3, 3, 64), dtype="int8")
weight3 = relay.var("weight3", shape=(64, 3, 3, 64))
x = rel |
ay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OHWI", "HWIO")
out = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
out = relay.cast(out, "int8")
out = relay.layout_transform(out, "NHWC", "NCHW")
weight2 = relay.layout_transform(weight2, "OHWI", "OIHW")
out = relay.qnn.op.conv2d(
out,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
out = relay.cast(out, "float32")
out = relay.layout_transform(out, "NCHW", "NHWC")
weight3 = relay.layout_transform(weight3, "OHWI", "HWIO")
out = relay.nn.conv2d_transpose(
out,
weight3,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
out = relay.layout_transform(out, "NHWC", "NCHW")
out = relay.Function(analysis.free_vars(out), out)
return out
a = before()
desired_layouts = {
"nn.conv2d": ["NHWC", "HWIO"],
"qnn.conv2d": ["NCHW", "OIHW"],
"nn.conv2d_transpose": ["NHWC", "HWIO"],
}
a = run_opt_pass(a, transform.ConvertLayout(desired_layouts))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_no_desired_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=64, |
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.layout_transform(y, "NHWC", "NCHW")
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "HWIO"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_convert_with_config():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
y2 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout |
="NHWC",
kernel_layout="HWIO",
)
y2 = relay.nn.relu(y2)
out = relay.Function([x, weight, weight2], y2)
return out
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
weight2 = relay.layout_transform(weight2, "HWIO", "HWOI")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NHWC", "HWNC")
y2 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="HWNC",
kernel_layout="HWOI",
)
y2 = relay.nn.relu(y2)
y2 = relay.layout_transform(y2, "HWNC", "NHWC")
output = relay.Function(relay.analysis.free_vars(y2), y2)
return output
a = before()
layout_config = relay.transform.LayoutConfig(skip_layers=[0])
with layout_config:
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["HWNC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_squeeze_convert_layout():
def _test_conv_squeeze_convert_layout1():
def before():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
y = relay.nn.conv2d(
x,
weight,
channels=1000,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.squeeze(y, axis=[-3])
return relay.Function(analysis.free_vars |
(y), y)
def expected():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=1000, kernel_size=(1, 1))
y = relay.nn.relu(y)
y = relay.squeeze(y, axis=[2])
y = relay.layout_transform(y, "NCW", "NWC")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_squeeze_convert_layout2():
def before():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
y = relay.nn.conv2d(
x,
weight,
channels=1000,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.squeeze(y)
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=1000, kernel_size=(1, 1))
y = relay.nn.relu(y)
y = relay.squeeze(y, [0, 2, 3])
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, |
b), "Actual = \n" + str(a)
def _test_conv_squeeze_convert_layout3():
def before():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
y = relay.nn.conv2d(
x,
weight,
channels=1000,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.squeeze(y, axis=[])
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=1000, kernel_size=(1, 1))
y = relay.nn.relu(y)
y = relay.squeeze(y, axis=[])
y = relay.layout_transform(y, "NCHW", "NHWC")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
_test_conv_squeeze_convert_layout1()
_test_conv_squeeze_convert_layout2()
_test_conv_squeeze_convert_layout3()
def test_conv_reduce_convert_layout():
def _test_conv_reduce_convert_layout1():
def before():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
y = relay.nn.conv2d(
x,
weight,
channels=1000,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.sum(y, axis=(1, 2))
y |
= relay.sum(y, axis=(1,))
y = relay.sum(y)
y = relay.sum(y)
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=1000, kernel_size=(1, 1))
y = relay.nn.relu(y)
y = relay.sum(y, axis=(2, 3))
y = relay.sum(y, axis=(1,))
y = relay.sum(y)
y = relay.sum(y)
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_reduce_convert_layout2():
def _set_span(y, text):
return relay.Call(
y.op, y.args, y.attrs, y.type_args, relay.Span(relay.SourceName(text), 0, 0, 0, 0)
)
def before():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
y = relay.nn.conv2d(
x,
weight,
channels=512,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = _set_span(y, "SpanConv2D")
y = relay.nn.relu(y)
y = _set_span(y, "SpanRelu")
y = relay.multiply(y, y)
y = _set_span(y, "SpanMultiply")
y = relay.sum(y, axis=(3,), keepdims=True)
y = _set_span(y, "SpanSum")
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, |
3, 512, 512))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=512, kernel_size=(3, 3))
y = relay.nn.relu(y)
y = relay.multiply(y, y)
y = relay.sum(y, axis=(1,), keepdims=True)
y = relay.layout_transform(y, "NCHW", "NHWC")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
assert "SpanConv2D" in a.astext()
assert "SpanRelu" in a.astext()
assert "SpanMultiply" in a.astext()
assert "SpanSum" in a.astext()
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
_test_conv_reduce_convert_layout1()
_test_conv_reduce_convert_layout2()
def test_image_resize2d_convert_layout():
def _test_image_resize_convert_layout_nchw_to_nhwc():
def before():
x = relay.var("x", shape=(1, 2, 4, 4))
y = relay.image.resize2d(x, (8, 8))
y = relay.Function([x], y)
return y
def expected():
x = relay.var("x", shape=(1, 2, 4, 4))
x = relay.layout_transform(x, "NCHW", "NHWC")
y = relay.image.resize2d(x, (8, 8), layout="NHWC")
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"image.resize2d": ["NHWC"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_image_resize_convert_layout_nhwc_to_nchw():
def before():
x = relay.var("x", shape=(1, 4, 4, 2))
y = relay.image.resize2d(x, (8, 8), layout="NHWC")
y = relay.Function([x], y) |
return y
def expected():
x = relay.var("x", shape=(1, 4, 4, 2))
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.image.resize2d(x, (8, 8), layout="NCHW")
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"image.resize2d": ["NCHW"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
_test_image_resize_convert_layout_nchw_to_nhwc()
_test_image_resize_convert_layout_nhwc_to_nchw()
def test_conv_image_resize2d_convert_layout():
"""Check that layout transforms are propagated through image resize."""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.image.resize2d(y, (112, 112), layout="NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.image.resize2d(y, (112, 112), layout="NCHW")
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_infer |
_correct_layout():
test_infer_correct_layout_flag = False
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
@reg.register_infer_correct_layout("nn.relu", level=11)
def infer_correct_layout_relu(attrs, new_in_layouts, old_in_layouts, old_in_types):
nonlocal test_infer_correct_layout_flag
test_infer_correct_layout_flag = True
ret = tvm.tir.layout("")
if new_in_layouts:
assert len(new_in_layouts) >= 1
ret = new_in_layouts[0]
else:
for i in range(len(old_in_layouts)):
if old_in_layouts[i]:
ret = old_in_layouts[i]
break
input_layouts = []
for i in range(len(old_in_layouts)):
input_layouts.append(ret)
return InferCorrectLayoutOutput(input_layouts, [ret], attrs)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
assert test_infer_correct_layout_flag == True
def test_reduce_op_convert_layout():
for reduce_op in [relay.argmax, relay.mean, relay.max]:
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = reduce_op(y, axis=[2, 3])
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.v |
ar("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = reduce_op(y, axis=[1, 2])
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_max_pool_uses_specified_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NCHW")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "OHWI")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="OHWI",
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC", out_layout="NHW |
C")
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(
a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "OHWI"], "nn.max_pool2d": ["NHWC"]})
)
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
@pytest.mark.parametrize(
"data_layout, kernel_layout",
[
("NCHW1c", "OIHW1i1o"),
("NCHW4c", "OIHW4i4o"),
("NCHW8c", "OIHW8i8o"),
("NCHW16c", "OIHW16i16o"),
],
)
def test_resnet_convert_layout_nchwc(data_layout, kernel_layout):
x = relay.var("x", shape=(1, 3, 224, 224))
weight1 = relay.var("weight1", shape=(64, 3, 7, 7))
weight2 = relay.var("weight2", shape=(64, 64, 3, 3))
weight3 = relay.var("weight3", shape=(64, 64, 1, 1))
def before():
y = relay.nn.conv2d(
x,
weight1,
strides=(2, 2),
padding=(3, 3),
channels=64,
kernel_size=(7, 7),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(3, 3), strides=(2, 2), padding=(1, 1))
y1 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
y,
weight3,
channels=64,
kernel_size=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y2 = relay.nn.relu(y2)
y = y1 + y2
y = relay.nn.global_max_pool2d(y, layout="NCHW")
return y
def expected():
if data_layout == "NCHW1c":
y = relay.nn.contrib_conv2d_nchwc(
relay.l |
ayout_transform(x, "NCHW", data_layout),
relay.layout_transform(weight1, "OIHW", kernel_layout),
strides=(2, 2),
padding=(3, 3),
channels=64,
kernel_size=(7, 7),
data_layout=data_layout,
kernel_layout=kernel_layout,
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(
y, pool_size=(3, 3), strides=(2, 2), padding=(1, 1), layout=data_layout
)
else:
y = relay.nn.conv2d(
x,
weight1,
strides=(2, 2),
padding=(3, 3),
channels=64,
kernel_size=(7, 7),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(3, 3), strides=(2, 2), padding=(1, 1))
y = relay.layout_transform(y, "NCHW", data_layout)
y1 = relay.nn.contrib_conv2d_nchwc(
y,
relay.layout_transform(weight2, "OIHW", kernel_layout),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout=data_layout,
kernel_layout=kernel_layout,
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.contrib_conv2d_nchwc(
y,
relay.layout_transform(weight3, "OIHW", kernel_layout),
channels=64,
kernel_size=(1, 1),
data_layout=data_layout,
kernel_layout=kernel_layout,
)
y2 = relay.nn.relu(y2)
y = y1 + y2
y = relay.nn.global_max_pool2d(y, layout=data_layout)
y = relay.layout_transform(y, data_layout, "NCHW")
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": [data_layout, kernel_layout]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n Expect = \ |
n" + str(b)
def test_conv_l2n_convert_layout():
"""Check that layout transforms are propagated through bn."""
axis_list = ([3], [-1], [2, 3])
expected_axis = ([1], [1], [3, 1])
for i, axis in enumerate(axis_list):
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
z = relay.nn.l2_normalize(y, eps=0.001, axis=axis)
z = relay.Function(analysis.free_vars(z), z)
return z
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
z = relay.nn.l2_normalize(y, eps=0.001, axis=expected_axis[i])
z = relay.layout_transform(z, "NCHW", "NHWC")
z = relay.Function(analysis.free_vars(z), z)
return z
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm |
import tvm.testing
from tvm.relay |
import Function, transform
from tvm.relay.testing |
import inception_v3 |
import pytest
cpu_scope = tvm.target.VirtualDevice(tvm.cpu(), tvm.target.Target("llvm"))
metatable = {"VirtualDevice": [cpu_scope]}
core = tvm.IRModule()
core.import_from_std("core.rly")
def optimize_and_check(before_program, after_program, passes):
if isinstance(before_program, str):
before_program = tvm.parser.parse(before_program)
if isinstance(after_program, str):
after_program = tvm.parser.parse(after_program)
if not isinstance(passes, list):
passes = [passes]
optimize = tvm.transform.Sequential(passes)
optimized_program = optimize(before_program)
print("Actual:")
print(optimized_program)
print("Expected:")
print(after_program)
assert tvm.ir.structural_equal(optimized_program, after_program, map_free_vars=True)
def test_dead_let():
before_program = """
def @main(%z: int) {
let %x = 1;
%z
}
"""
after_program = """
def @main(%z: int) {
%z
}
"""
optimize_and_check(before_program, after_program, transform.DeadCodeElimination())
def test_one_live_let():
before_program = """
def @main(%z: int) {
let %x = 1;
let %y = 2;
%x + %x
}
"""
after_program = """
def @main(%z: int) {
let %x = 1;
%x + %x
}
"""
optimize_and_check(before_program, after_program, transform.DeadCodeElimination())
def test_nested_let():
before_program = """
def @main(%d: int, %b: int) {
let %a = %b;
let %c = %d;
%c
}
"""
after_program = """
def @main(%d: int, %b: int) {
let %c = %d;
%c
}
"""
optimize_and_check(before_program, after_program, transform.DeadCodeElimination())
def test_live_recursion():
before_program = """
def @main() {
let %f = fn (%n: int, %data: int) -> int {
if (%n == 0) {
%data
} else {
%f(%n - 1, log(%data))
}
};
% |
f(2, 10000)
}
"""
after_program = """
def @main() {
let %f = fn (%n: int, %data: int) -> int {
if (%n == 0) {
%data
} else {
%f(%n - 1, log(%data))
}
};
%f(2, 10000)
}
"""
optimize_and_check(
before_program, after_program, [transform.DeadCodeElimination(), transform.InferType()]
)
def test_dead_recursion():
before_program = """
def @main() {
let %f = fn (%n: int, %data: int) -> int {
if (%n == 0) {
%data
} else {
%f(%n - 1, log(%data))
}
};
()
}
"""
after_program = """
def @main() {
()
}
"""
optimize_and_check(
before_program, after_program, [transform.DeadCodeElimination(), transform.InferType()]
)
def test_add_with_let():
before_program = """
def @main() {
(let %a = 1; 3) + 2
}
"""
after_program = """
def @main() {
3 + 2
}
"""
optimize_and_check(
before_program, after_program, [transform.DeadCodeElimination(), transform.InferType()]
)
def test_tuple_get_item():
before_program = """
def @main() {
let %a = 100;
(1, 2, 3, 4).0
}
"""
after_program = """
def @main() {
(1, 2, 3, 4).0
}
"""
optimize_and_check(before_program, after_program, transform.DeadCodeElimination())
def test_inline_into_function():
"""Don't inline across function boundaries."""
before_program = """
def @main() {
let %x = 1 + 1;
let %f = fn (%y: int) -> int {
let %z = %y + %y;
%x + %z
};
(%f(2), %f(3))
}
"""
after_program = """
def @main() {
let %x = 1 + 1;
let %f = fn (%y: int) -> int {
%x + (%y + %y)
};
(%f(2), %f(3))
}
"""
optimize_and_check(
bef |
ore_program, after_program, transform.DeadCodeElimination(inline_once=True)
)
def test_impure_op():
"""Don't elide calls to side-effecting operators."""
before_program = tvm.parser.parse(
"""
def @main() {
let %size: int64 = cast(1024, dtype="int64");
let %alignment: int64 = cast(64, dtype="int64");
let %x = memory.alloc_storage(%size, %alignment, virtual_device=meta[VirtualDevice][0]);
let %_ = memory.kill(%x);
0
}
""",
"from_string",
core,
metatable,
)
after_program = tvm.parser.parse(
"""
def @main() {
%0 = memory.alloc_storage(cast(1024, dtype="int64"),
cast(64, dtype="int64"),
virtual_device=meta[VirtualDevice][0]);
let %_ = memory.kill(%0);
0
}
""",
"from_string",
core,
metatable,
)
optimize_and_check(
before_program, after_program, transform.DeadCodeElimination(inline_once=True)
)
def test_impure_func():
"""Don't elide calls to side-effecting functions."""
before_program = tvm.parser.parse(
"""
def @f() -> int {
let %size: int64 = cast(1024, dtype="int64");
let %alignment: int64 = cast(64, dtype="int64");
let %x = memory.alloc_storage(%size, %alignment, virtual_device=meta[VirtualDevice][0]);
let %_ = memory.kill(%x);
0
}
def @main() -> int {
let %y = @f();
0
}
""",
"from_string",
core,
metatable,
)
after_program = tvm.parser.parse(
"""
def @f() -> int {
%0 = memory.alloc_storage(cast(1024, dtype="int64"),
cast(64, dtype="int64"),
virtual_device=meta[VirtualDevice][0]);
let %_ = memor |
y.kill(%0);
0
}
def @main() -> int {
let %y = @f();
0
}
""",
"from_string",
core,
metatable,
)
optimize_and_check(
before_program, after_program, transform.DeadCodeElimination(inline_once=True)
)
def test_refs():
"""Don't elide expressions with reference create/read/write side effects"""
before_program = """
def @f(%r) -> int {
let %v = ref_read(%r);
let %u = ref_write(%r, %v + 1);
%v
}
def @main() -> int {
let %r = ref(0);
let %y = @f(%r);
let %z = @f(%r);
%z
}
"""
after_program = before_program
optimize_and_check(
before_program,
after_program,
[transform.InferType(), transform.DeadCodeElimination(inline_once=True)],
)
def test_complexity():
mod = transform.InferType()(
tvm.IRModule.from_expr(inception_v3.get_net(1, 1000, (3, 299, 299), "float32"))
)
optimize_and_check(mod, mod, transform.DeadCodeElimination(inline_once=True))
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import numpy as np |
import tvm
from tvm |
import relay
from tvm.relay.backend.interpreter |
import ConstructorValue
from tvm.relay |
import transform, ExprVisitor, TypeVisitor
from tvm.relay.testing |
import Prelude
def has_func_type(t): |
class FuncTypeVisitor(TypeVisitor):
def __init__(self):
super().__init__()
self.has_func = False
def visit_func_type(self, ftt):
self.has_func = True
ftvisitor = FuncTypeVisitor()
ftvisitor.visit(t)
return ftvisitor.has_func
def assert_no_higher_order_functions(expr, mod): |
class CheckFirstOrderVisitor(ExprVisitor):
def __init__(self, mod):
super().__init__()
self.mod = mod
self.hof = []
self.visited_gv = set()
def visit_call(self, call):
is_higher_order = False
if has_func_type(call.checked_type):
is_higher_order = True
for a in call.args:
if has_func_type(a.checked_type):
is_higher_order = True
if is_higher_order:
self.hof.append(call)
super().visit_call(call)
def visit_global_var(self, gv):
if gv not in self.visited_gv:
self.visited_gv.add(gv)
self.visit(self.mod[gv])
mod = transform.InferType()(mod)
check_fo_visitor = CheckFirstOrderVisitor(mod)
check_fo_visitor.visit(expr)
nl = "\n--------\n"
errmsg = f"""found {len(check_fo_visitor.hof)} higher order functions:
{nl.join(expr.astext() for expr in check_fo_visitor.hof)}"""
assert len(check_fo_visitor.hof) == 0, errmsg
def defunctionalized(mod):
mod = transform.InferType()(mod)
mod["main"] = transform.Defunctionalization(mod["main"], mod)
mod = transform.InferType()(mod)
assert_no_higher_order_functions(mod["main"], mod)
return mod
def to_list(mod, l):
list = mod.get_global_type_var("List")
list_adt = mod[list]
cons = list_adt.constructors[0]
nil = list_adt.constructors[1]
assert isinstance(l, ConstructorValue)
val = l
ret = []
while True:
if val.tag == cons.tag:
ret.append(val.fields[0].numpy())
val = val.fields[1]
else:
assert val.tag == nil.tag
break
return ret
def to_adt_list(mod, arr):
expr = mod["main"]
l = mod.get_global_type_var("List")
list_adt = mod[l]
cons = list_adt.constructors[0]
nil = list_adt.constructors[1]
li = nil()
for a in arr: |
li = cons(relay.const(a), li)
adt = relay.create_executor(mod=mod).evaluate(li)
mod["main"] = expr
return adt
def test_simple():
code = """
def @simple[A, B](%f: fn(A) -> B, %xs: A) -> B {
%f(%xs)
}
def @main(%l: Tensor[(5, 5), float32]) -> Tensor[(5, 5), float32] {
%0 = fn[A](%x: A) -> A {
%x
};
@simple(%0, %l)
}
"""
mod = tvm.parser.fromtext(code)
defunc_mod = defunctionalized(mod)
input = np.random.rand(5, 5).astype("float32")
out = relay.create_executor("debug", mod=mod).evaluate()(input)
defunc_out = relay.create_executor("debug", mod=defunc_mod).evaluate()(input)
np.testing.assert_equal(out.numpy(), defunc_out.numpy())
def test_global_recursion():
code = """
type List[A] {
Cons(A, List[A]),
Nil,
}
def @id[A](%x: A) -> A {
%x
}
def @map[A, B](%f: fn(A) -> B, %xs: List[A]) -> List[B] {
match (%xs) {
Cons(%x, %rest) => Cons(%f(%x), @map(%f, %rest)),
Nil => Nil,
}
}
def @main(%l: List[float32]) -> List[float32] {
@map(@id, %l)
}
"""
mod = tvm.parser.fromtext(code)
defunc_mod = defunctionalized(mod)
input = np.random.rand(10).astype("float32")
out = relay.create_executor("debug", mod=mod).evaluate(mod["main"])(to_adt_list(mod, input))
defunc_out = relay.create_executor("debug", mod=defunc_mod).evaluate()(
to_adt_list(defunc_mod, input)
)
np.testing.assert_array_equal(to_list(mod, out), to_list(defunc_mod, defunc_out))
def test_recursive_datatype():
code = """
type List[A] {
Cons(A, List[A]),
Nil,
}
def @sum(%f: fn(int32) -> int32, %k: List[int32]) -> int32 {
match (%k) {
Cons(%x, %rest) => %0 = fn(%n) {
%x + %f(%n)
};
@sum(%0, %rest),
Nil => %f(0),
}
}
def @id[A](%x: A) -> A {
%x
}
def @main(%l: List[int32]) -> int32 {
@sum(@id, %l)
}
"""
mod = tvm.parser.fromtext(code)
defunc_mod = defunctionalized(mod)
input = np.random.randint(1, 100, 10)
out = relay.create_executor("debug", mod=mod).evaluate(mod["main"])(to_adt_lis |
t(mod, input))
defunc_out = relay.create_executor("debug", mod=defunc_mod).evaluate()(
to_adt_list(defunc_mod, input)
)
tvm.testing.assert_allclose(out.numpy(), defunc_out.numpy())
if __name__ == "__main__":
pytest.main([__file__]) |
import numpy |
import pytest |
import tvm
from tvm |
import relay
from tvm.relay |
import transform
from tvm.relay.testing |
import run_opt_pass
def test_defuse_simple():
"""Simple testcase."""
def before():
x = relay.var("x", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
return relay.Function([x], w)
x = before()
x = run_opt_pass(x, transform.InferType())
fused = run_opt_pass(x, transform.FuseOps())
defused = run_opt_pass(fused, transform.DefuseOps())
assert tvm.ir.structural_equal(x, defused)
def test_inception_like():
def conv(data):
y = relay.nn.conv2d(data, relay.var("w"), kernel_size=(3, 3), padding=(1, 1), channels=16)
return relay.nn.relu(data=y)
def inception_like(data):
c0 = conv(data)
c1 = conv(data)
return relay.concatenate((c0, c1), axis=1)
def before(dshape):
x = relay.var("x", shape=dshape)
in1 = inception_like(x)
in2 = inception_like(in1)
return relay.Function(relay.analysis.free_vars(in2), in2)
dshape = (1, 16, 64, 64)
x = before(dshape)
x = run_opt_pass(x, transform.InferType())
fused = run_opt_pass(x, transform.FuseOps())
defused = run_opt_pass(fused, transform.DefuseOps())
assert tvm.ir.structural_equal(x, defused)
def test_defuse_complex():
"""Complex defuse testcase"""
def fused_conv2d_batch_norm(w):
data = relay.var("data", shape=(1, 224, 224, 3))
bn_gamma0 = relay.var("bn_gamma0", relay.TensorType((64,), "float32"))
bn_beta0 = relay.var("bn_beta0", relay.TensorType((64,), "float32"))
bn_mmean0 = relay.var("bn_mean0", relay.TensorType((64,), "float32"))
bn_mvar0 = relay.var("bn_var0", relay.TensorType((64,), "float32"))
c0 = relay.nn.conv2d(
data,
w,
strides=(2, 2),
padding=(3, 3, 3, 3),
channels=64,
kernel_size=(7, 7),
data_layout="NHWC",
kernel_layout="OHWI",
out_layout="NHWC",
)
c1 = relay.nn.bat |
ch_norm(c0, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0, axis=3)
c2 = c1[0]
return relay.Function(relay.analysis.free_vars(c2), c2)
def fused_conv2d_batch_norm_relu(z):
data2 = relay.var("data2", shape=(1, 56, 56, 64))
bn_gamma0 = relay.var("bn_gamma0", relay.TensorType((64,), "float32"))
bn_beta0 = relay.var("bn_beta0", relay.TensorType((64,), "float32"))
bn_mmean0 = relay.var("bn_mean0", relay.TensorType((64,), "float32"))
bn_mvar0 = relay.var("bn_var0", relay.TensorType((64,), "float32"))
c0 = relay.nn.conv2d(
data2,
z,
padding=(1, 1, 1, 1),
channels=64,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="OHWI",
out_layout="NHWC",
)
c1 = relay.nn.batch_norm(c0, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0, axis=3)
c2 = c1[0]
c3 = relay.nn.relu(data=c2)
return relay.Function(relay.analysis.free_vars(c3), c3)
def fused_max_pool2d():
data1 = relay.var("data1", shape=(1, 112, 112, 64))
a1 = relay.nn.max_pool2d(
data1,
pool_size=(3, 3),
strides=(2, 2),
padding=(1, 1, 1, 1),
layout="NHWC",
out_layout="NHWC",
)
return relay.Function(relay.analysis.free_vars(a1), a1)
def fused_add_relu():
data1 = relay.var("data1", shape=(1, 56, 56, 64))
data2 = relay.var("data2", shape=(1, 56, 56, 64))
a0 = relay.add(data1, data2)
a1 = relay.nn.relu(a0)
return relay.Function(relay.analysis.free_vars(a1), a1)
def before_fused(conv_layer1_weight, conv_layer2_weight):
data = relay.var("data", shape=(1, 3, 224, 224))
data1 = relay.layout_transform(data, src_layout="NCHW", dst_layout="NHWC")
bn_gamma0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_beta0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32"))) |
bn_mmean0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_mvar0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
a0 = fused_conv2d_batch_norm(conv_layer1_weight)
a1 = fused_max_pool2d()
a2 = fused_conv2d_batch_norm_relu(conv_layer2_weight)
a3 = fused_add_relu()
y0 = relay.Call(a0, [data1, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0])
y1 = relay.Call(a1, [y0])
y2 = relay.Call(a2, [y1, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0])
y3 = relay.Call(a3, [y1, y2])
return relay.Function(relay.analysis.free_vars(y3), y3)
def golden_defused(conv_layer1_weight, conv_layer2_weight):
data = relay.var("data", shape=(1, 3, 224, 224))
data1 = relay.layout_transform(data, src_layout="NCHW", dst_layout="NHWC")
bn_gamma0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_beta0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_mmean0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_mvar0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
c0 = relay.nn.conv2d(
data1,
conv_layer1_weight,
strides=(2, 2),
padding=(3, 3, 3, 3),
channels=64,
kernel_size=(7, 7),
data_layout="NHWC",
kernel_layout="OHWI",
out_layout="NHWC",
)
c1 = relay.nn.batch_norm(c0, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0, axis=3)
c2 = c1[0]
c3 = relay.nn.max_pool2d(
c2,
pool_size=(3, 3),
strides=(2, 2),
padding=(1, 1, 1, 1),
layout="NHWC",
out_layout="NHWC",
)
c4 = relay.nn.conv2d(
c3,
conv_layer2_weight,
padding=(1, 1, 1, 1),
channels=64,
kernel_size=(3, 3),
data_layout="NHWC", |
kernel_layout="OHWI",
out_layout="NHWC",
)
c5 = relay.nn.batch_norm(c4, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0, axis=3)
c6 = c5[0]
c7 = relay.nn.relu(c6)
c8 = relay.add(c3, c7)
c9 = relay.nn.relu(c8)
return relay.Function(relay.analysis.free_vars(c9), c9)
conv_layer1_weight = relay.nn.Constant(
tvm.nd.array(numpy.ndarray(shape=(64, 7, 7, 3), dtype="float32"))
)
conv_layer2_weight = relay.nn.Constant(
tvm.nd.array(numpy.ndarray(shape=(64, 3, 3, 64), dtype="float32"))
)
x = before_fused(conv_layer1_weight, conv_layer2_weight)
x = run_opt_pass(x, transform.InferType())
defused = run_opt_pass(x, transform.DefuseOps())
golden1 = golden_defused(conv_layer1_weight, conv_layer2_weight)
golden1 = run_opt_pass(golden1, transform.InferType())
assert tvm.ir.structural_equal(defused, golden1), (
"Actual = \n" + str(defused) + "\nGolden = \n" + str(golden1)
)
if __name__ == "__main__":
pytest.main([__file__]) |
import numpy as np |
import pytest |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import transform
from tvm.relay.build_module |
import bind_params_by_name
from tvm.relay.testing |
import run_infer_type, create_workload |
import tvm.topi.testing |
import tvm.testing
def run_opt_pass(expr, opt_pass, params=None):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
if params is not None:
mod["main"] = bind_params_by_name(mod["main"], params)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def verify_func(func, data, ref_res, rtol=1e-5, atol=1e-7):
assert isinstance(data, list)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "vm", "debug"]:
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
*data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)
@tvm.testing.uses_gpu
def test_dynamic_to_static_reshape():
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(newshape, "float32"))
z = relay.reshape(x, relay.shape_of(y))
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("reshape")
assert "newshape=" in zz.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=newshape).astype("float32")
ref_res = np.reshape(x_data, oshape)
verify_func(func2, [x_data, y_data], ref_res)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
@tvm.testing.uses_gpu
def test_dynamic_to_static_squeeze():
def verify_squeeze(shape, axis, oshape):
x = relay.var("x", relay.TensorType(shape, "float32")) |
y = relay.var("y", relay.TensorType(axis, "float32"))
z = relay.squeeze(x, relay.shape_of(y))
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("squeeze")
assert "axis=" in zz.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=axis).astype("float32")
ref_res = np.squeeze(x_data, axis)
verify_func(func2, [x_data, y_data], ref_res)
verify_squeeze((1, 3, 4, 1), (0,), (3, 4, 1))
verify_squeeze((1, 3, 4, 1), (3,), (1, 3, 4))
verify_squeeze((1, 3, 4, 1), (0, 3), (3, 4))
@tvm.testing.uses_gpu
def test_dynamic_to_static_double_reshape():
def verify_reshape(shape, newshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(newshape, "float32"))
z = relay.reshape(x, relay.shape_of(y))
z = relay.reshape(z, relay.shape_of(x))
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("reshape")
assert "newshape=" in zz.astext()
assert zz.checked_type == relay.ty.TensorType(shape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=newshape).astype("float32")
verify_func(func2, [x_data, y_data], x_data)
verify_reshape((2, 3, 4), (8, 3))
verify_reshape((4, 7), (2, 7, 2))
@tvm.testing.uses_gpu
def test_dynamic_to_static_quad_reshape():
def verify_reshape(shape, newshape):
x = relay.var("x", |
relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(newshape, "float32"))
z1 = relay.reshape(x, relay.shape_of(y))
z2 = relay.reshape(z1, relay.shape_of(x))
z3 = relay.reshape(z2, relay.shape_of(z1))
z4 = relay.reshape(z3, relay.shape_of(z2))
func = run_infer_type(relay.Function([x, y], z4))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("reshape")
assert "newshape=" in zz.astext()
assert zz.checked_type == relay.ty.TensorType(shape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=newshape).astype("float32")
verify_func(func2, [x_data, y_data], x_data)
verify_reshape((2, 3, 4), (8, 3))
verify_reshape((4, 7), (2, 7, 2))
@tvm.testing.uses_gpu
def test_dynamic_to_static_tile():
def verify_tile(shape, reps, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(reps, "float32"))
z = relay.tile(x, relay.shape_of(y))
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("tile")
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=reps).astype("float32")
ref_res = np.tile(x_data, reps)
verify_func(func2, [x_data, y_data], ref_res)
verify_tile((2, 3, 4), (2, 1, 5), (4, 3, 20))
verify_tile((4, 7), (4, 2), (16, 14))
@tvm.testing.uses_gpu
def test_dynamic_to_static_topk():
def verify_topk(k, axis, ret_type, |
is_ascend, dtype):
shape = (20, 100)
x = relay.var("x", relay.TensorType(shape, "float32"))
k_var = relay.var("k", relay.TensorType((), "int32"))
out = relay.topk(x, k_var, axis, ret_type, is_ascend, dtype)
if isinstance(out, relay.expr.TupleWrapper):
out = out.astuple()
func = relay.Function([x, k_var], out)
params = {"k": k}
np_data = np.random.uniform(size=shape).astype("float32")
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
kk = k if k >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype("float32")
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype("float32")
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("topk")
for target, dev in tvm.testing.enabled_targets():
if "llvm" not in target:
continue
for kind in ["graph", "vm", "debug"]:
mod = tvm.ir.IRModule.from_expr(func2)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
np_data
)
if ret_type == "both":
tvm.testing.assert_allclose(op_res[0].numpy(), np_values)
tvm.testing.assert_allclose(op_res[1].numpy(), np_indices)
elif ret_type == "values":
tvm.test |
ing.assert_allclose(op_res.numpy(), np_values)
else:
tvm.testing.assert_allclose(op_res.numpy(), np_indices)
np.random.seed(0)
for k in [0, 1, 5]:
for axis in [0, -1, 1]:
for ret_type in ["both", "values", "indices"]:
verify_topk(k, axis, ret_type, True, "int64")
verify_topk(k, axis, ret_type, False, "float32")
@tvm.testing.uses_gpu
def test_dynamic_to_static_broadcast_to():
def verify_broadcast_to(shape, broadcast_shape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(broadcast_shape, "float32"))
z = relay.broadcast_to(x, shape=relay.shape_of(y))
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("broadcast_to")
assert zz.checked_type == relay.ty.TensorType(broadcast_shape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=broadcast_shape).astype("float32")
ref_res = np.broadcast_to(x_data, y_data.shape)
verify_func(func2, [x_data, y_data], ref_res)
verify_broadcast_to((3, 1), (3, 3))
@tvm.testing.uses_gpu
def test_dynamic_to_static_zeros_ones():
def verify_ones_zeros(shape, dtype):
for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
x = relay.var("x", relay.TensorType(shape, dtype))
y = op(relay.shape_of(x), dtype)
func = run_infer_type(relay.Function([x], y))
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic()),
transform.InferType(),
)
zz = func2.body
assert zz.checked_type == relay.ty.TensorType(shape, dtype)
x_data = np.random.unifor |
m(low=1, high=1, size=shape)
ref_res = ref(x_data.shape)
verify_func(func2, [x_data], ref_res)
verify_ones_zeros((1, 2, 3), "int64")
verify_ones_zeros((9, 8, 3, 4), "float32")
@tvm.testing.uses_gpu
def test_dynamic_to_static_resize2d():
def verify_resize(shape, scale, method, layout):
if layout == "NHWC":
size = (shape[1] * scale, shape[2] * scale)
else:
size = (shape[2] * scale, shape[3] * scale)
x = relay.var("x", relay.TensorType(shape, "float32"))
size_var = relay.var("size", relay.TensorType((len(size),), "float32"))
coord_trans = "asymmetric" if method == "nearest_neighbor" else "align_corners"
z = relay.image.resize2d(
x, size_var, None, layout, method, coordinate_transformation_mode=coord_trans
)
params = {"size": np.array(size).astype("float32")}
func = run_infer_type(relay.Function([x, size_var], z))
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("image.resize2d")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = tvm.topi.testing.resize2d_python(
x_data, (scale, scale), layout, method, coord_trans
)
for method in ["linear", "nearest_neighbor"]:
for layout in ["NCHW", "NHWC"]:
verify_resize((1, 4, 4, 4), 2, method, layout)
@tvm.testing.uses_gpu
def test_dynamic_to_static_one_hot():
def _verify(indices_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
depth_var = relay.const(depth)
on_value_var = relay.var("on_value", relay.TensorType((), "int32"))
off_value_var = relay.var("off_value", relay.TensorType((), "int32"))
out = relay.one_hot(indices, on_value_var, off_va |
lue_var, depth_var, axis, dtype)
params = {
"on_value": on_value,
"off_value": off_value,
}
func = relay.Function([indices, on_value_var, off_value_var], out)
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("one_hot")
indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
verify_func(func2, [indices_np], out_np)
_verify((3,), 3, 1, 0, -1, "int32")
_verify((3,), 3, 1.0, 0.0, -1, "float32")
_verify((2, 2), 5, 2, -2, 0, "int32")
_verify((2, 2), 5, 0.5, -0.5, 1, "float32")
_verify((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_verify((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
@tvm.testing.uses_gpu
def test_dynamic_to_static_full():
def verify_full(fill_value, fill_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
y = relay.var("y", relay.TensorType(fill_shape, "int64"))
z = relay.full(x, relay.shape_of(y), dtype)
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("full")
ref_res = np.full(fill_shape, fill_value).astype(dtype)
y_data = np.random.uniform(low=-1, high=1, size=fill_shape).astype("int64")
verify_func(func2, [fill_value, y_data], ref_res)
verify_full(4, (1, 2, 3, 4), "int32")
verify_full(4.0, (1, 2, 8, 10), "float32")
def test_dynamic_to_static_upsampling():
def verify_upsampling(data_shape, scale_h_val, scale_w_val, dtype):
x = relay.var("x", relay.TensorType(data_shape, dtype))
scale_h = relay.var("scale_h", relay.TensorType(() |
, "float32"))
scale_w = relay.var("scale_w", relay.TensorType((), "float32"))
z = relay.nn.upsampling(x, scale_h, scale_w)
params = {
"scale_h": scale_h_val,
"scale_w": scale_w_val,
}
func = run_infer_type(relay.Function([x, scale_h, scale_w], z))
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("nn.upsampling")
x_data = np.random.uniform(size=data_shape).astype(dtype)
ref_res = tvm.topi.testing.resize2d_python(
x_data, (scale_h_val, scale_w_val), "NCHW", "nearest_neighbor", "asymmetric"
)
verify_func(func2, [x_data], ref_res)
verify_upsampling((1, 16, 32, 32), 2, 2, "int8")
verify_upsampling((1, 16, 32, 32), 4, 4, "int32")
def test_dynamic_to_static_upsampling3d():
def verify_upsampling3d(data_shape, scale_d_val, scale_h_val, scale_w_val, dtype):
x = relay.var("x", relay.TensorType(data_shape, dtype))
scale_d = relay.var("scale_d", relay.TensorType((), "float32"))
scale_h = relay.var("scale_h", relay.TensorType((), "float32"))
scale_w = relay.var("scale_w", relay.TensorType((), "float32"))
z = relay.nn.upsampling3d(x, scale_d, scale_h, scale_w)
params = {
"scale_d": scale_d_val,
"scale_h": scale_h_val,
"scale_w": scale_w_val,
}
func = run_infer_type(relay.Function([x, scale_d, scale_h, scale_w], z))
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("nn.upsampling3d")
x_data = np.random.uniform(size=data_shape).astype(dtype)
ref_res = tvm.topi.testing.resize3d_python(
x_data,
(scale_d_v |
al, scale_h_val, scale_w_val),
"NCDHW",
"nearest_neighbor",
"asymmetric",
)
verify_func(func2, [x_data], ref_res)
verify_upsampling3d((1, 1, 1, 1, 1), 2, 3, 4, "int8")
verify_upsampling3d((5, 7, 8, 10, 32), 3, 2, 2, "int8")
verify_upsampling3d((1, 4, 2, 5, 3), 5, 4, 3, "int32")
def test_dynamic_to_static_pad():
def verify_pad(data_shape, pad_width_val, pad_val, dtype):
x = relay.var("x", relay.TensorType(data_shape, dtype))
pad_width = relay.var(
"pad_width", relay.TensorType((len(pad_width_val), len(pad_width_val[0])), "int32")
)
z = relay.nn.pad(x, pad_width, pad_val)
func = run_infer_type(relay.Function([x, pad_width], z))
params = {"pad_width": np.array(pad_width_val)}
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("nn.pad")
x_data = np.random.uniform(size=data_shape).astype(dtype)
ref_res = np.pad(
x_data, pad_width_val, "constant", constant_values=(((pad_val,) * 2),) * len(data_shape)
)
verify_func(func2, [x_data], ref_res)
verify_pad((4, 10, 7, 7), ((1, 1), (2, 2), (3, 3), (4, 4)), 2.0, "int32")
verify_pad((2, 7), ((1, 4), (2, 2)), 4.0, "float64")
def test_dynamic_to_static_strided_slice():
def verify(
dshape,
begin_val,
end_val,
strides_val,
output,
slice_mode="end",
test_ref=True,
dtype="int32",
):
x = relay.var("x", relay.TensorType(dshape, "float32"))
ndim = len(dshape)
begin_val = begin_val if begin_val else [0] * ndim
end_val = end_val if end_val else list(dshape)
if strides_val:
if len(strides_val) == 1:
strides_val = strides_val * ndim
else:
strides_val = [1] * ndim |
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.strided_slice_python(
x_data, begin_val, end_val, strides_val, slice_mode
)
data = [x_data, np.array(begin_val), np.array(end_val)]
begin = relay.var("begin", relay.TensorType((len(begin_val),), dtype))
end = relay.var("end", relay.TensorType((len(end_val),), dtype))
func_params = [x, begin, end]
if strides_val:
data.append(np.array(strides_val))
strides = relay.var("strides", relay.TensorType((len(strides_val),), dtype))
z = relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=slice_mode)
func_params.append(strides)
else:
z = relay.strided_slice(x, begin=begin, end=end, slice_mode=slice_mode)
func = relay.Function(func_params, z)
params = {"begin": begin_val, "end": end_val, "strides": strides_val}
func = run_infer_type(func)
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
assert isinstance(func2.body, relay.Call)
assert func2.body.op == relay.op.get("strided_slice")
verify_func(func2, [x_data], ref_res)
verify((1, 3, 10, 10), [0, 0, 0, 0], [1, 3, 10, 10], [1], (0, 3, 10, 10), dtype="int64")
verify(
(1, 224, 224, 3),
[0, 20, 20, 0],
[1, 140, 140, 3],
[1, 1, 1, 1],
(1, 120, 120, 3),
dtype="int64",
)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3), dtype="int16")
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 4], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))
verify((3, 4, 3) |
, [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))
verify(
(3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], (2, 4, 3), slice_mode="size", test_ref=False
)
verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], (2, 2, 3), slice_mode="size", test_ref=True)
@tvm.testing.uses_gpu
def test_dyn_to_static_sparse_to_dense():
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
output_shape_data = np.array(output_shape)
a = relay.var(
"a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype))
)
b = relay.var(
"b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype))
)
output_shape_const = relay.const(output_shape_data)
if default_value is None:
args = [a, b]
d = relay.sparse_to_dense(a, output_shape_const, b)
else:
c = relay.var(
"c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype))
)
args = [a, b, c]
d = relay.sparse_to_dense(a, output_shape_const, b, c)
zz = run_infer_type(d)
assert len(zz.checked_type.shape) == len(output_shape)
func = relay.Function(args, d)
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
assert isinstance(func2.body, relay.Call)
assert func2.body.op == relay.op.get("sparse_to_dense")
if default_value is None:
arguments = [sparse_indices_data, sparse_values_data]
else:
arguments = [sparse_indices_data, sparse_values_data, default_value_data]
verify_func(func2, arguments, xpected)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0])
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, |
3, 0, 0, 3])
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
)
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
)
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
)
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0])
@tvm.testing.uses_gpu
def test_dynamic_to_static_dynamic_rank():
def verify_full(fill_value, fill_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
y = relay.var("y", relay.TensorType(fill_shape, "int64"))
shape = relay.shape_of(y)
shape = relay.strided_slice(shape, [0], relay.shape_of(shape))
z = relay.full(x, shape, dtype)
func = relay.Function([x, y], z)
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("full")
ref_res = np.full(fill_shape, fill_value).astype(dtype)
y_data = np.random.uniform(low=-1, high=1, size=fill_shape).astype("int64")
verify_func(func2, [fill_value, y_data], ref_res)
verify_full(4, (1, 2, 3, 4), "int32")
verify_full(4.0, (1, 2, 8, 10), "float32")
@tvm.testing.uses_gpu
def test_dynamic_to_static_dynamic_if():
x = relay.var("x", relay.TensorType((2, 2), "int64"))
cond = relay.const(1)
iff = relay.If(cond, relay.reshape(x, [1, 4]), relay.reshape(x, (4, 1)))
func = relay.Function([x], iff)
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("reshape")
x_data = np.random.uniform(low=-1, high=1, size=(2, 2)).astype("int64")
verify_func(func2, [x_data], x_data.reshape(1, |
4))
if __name__ == "__main__":
pytest.main([__file__]) |
"""Test eliminate common subexpr pass""" |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.op |
import register_alter_op_layout
from tvm.relay |
import transform, analysis
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_simple():
def before():
x = relay.var("x", shape=(1, 16))
y1 = relay.nn.relu(x)
y2 = relay.nn.relu(x)
y1 = relay.add(y1, relay.const(1.0, "float32"))
y2 = relay.add(y2, relay.const(1.0, "float32"))
y = relay.add(y1, y2)
f = relay.Function([x], y)
return f
def expected():
x = relay.var("x", shape=(1, 16))
y = relay.nn.relu(x)
y = relay.add(y, relay.const(1.0, "float32"))
y = relay.add(y, y)
f = relay.Function([x], y)
return run_opt_pass(f, transform.InferType())
z = before()
z = run_opt_pass(z, transform.EliminateCommonSubexpr())
assert tvm.ir.structural_equal(z, expected())
def test_callback():
def before():
x = relay.var("x", shape=(1, 16))
y1 = relay.nn.relu(x)
y2 = relay.nn.relu(x)
y1 = relay.add(y1, relay.const(1.0, "float32"))
y2 = relay.add(y2, relay.const(1.0, "float32"))
y = relay.add(y1, y2)
f = relay.Function([x], y)
return f
def expected():
x = relay.var("x", shape=(1, 16))
y = relay.nn.relu(x)
y1 = relay.add(y, relay.const(1.0, "float32"))
y2 = relay.add(y, relay.const(1.0, "float32"))
y = relay.add(y1, y2)
f = relay.Function([x], y)
return run_opt_pass(f, transform.InferType())
def fskip(expr):
if isinstance(expr, relay.expr.Call) and expr.op.name == "add":
return True
return False
z = before()
z = run_opt_pass(z, transform.EliminateCommonSubexpr(fskip))
assert tvm.ir.structural_equal(z, expected())
def test_tuple_get_time():
def before():
x = relay.var("x", shape=(1, 16, 1, 1))
var = relay.var("v |
ar", shape=(16,))
mean = relay.var("mean", shape=(16,))
beta = relay.var("beta", shape=(16,))
gamma = relay.var("gamma", shape=(16,))
BN = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)
T1 = BN[0]
T2 = BN[0]
add = T1 + T2
f = relay.Function([x, var, mean, beta, gamma], add)
return f
def expected():
x = relay.var("x", shape=(1, 16, 1, 1))
var = relay.var("var", shape=(16,))
mean = relay.var("mean", shape=(16,))
beta = relay.var("beta", shape=(16,))
gamma = relay.var("gamma", shape=(16,))
BN = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)
T1 = BN[0]
add = T1 + T1
f = relay.Function([x, var, mean, beta, gamma], add)
return run_opt_pass(f, transform.InferType())
z = before()
z = run_opt_pass(z, transform.EliminateCommonSubexpr())
assert tvm.ir.structural_equal(z, expected())
if __name__ == "__main__":
test_simple()
test_callback() |
import os |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import relay |
import tvm.relay.transform as _transform
def test_eta_expand_global_var():
mod = tvm.parser.fromtext(
r"""
def @aux(%x: Tensor[(), int32]) -> Tensor[(), int32] {
%x
}
def @main() -> fn(Tensor[(), int32]) -> Tensor[(), int32] {
@aux
}
"""
)
seq = tvm.transform.Sequential([_transform.EtaExpand(expand_global_var=True)])
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
expected = tvm.parser.fromtext(
r"""
def @aux(%x: Tensor[(), int32]) -> Tensor[(), int32] {
%x
}
def @main() -> fn(Tensor[(), int32]) -> Tensor[(), int32] {
fn (%x: Tensor[(), int32]) -> Tensor[(), int32] {
@aux(%x)
}
}
"""
)
tvm.ir.assert_structural_equal(mod["main"], expected["main"], map_free_vars=True)
def test_eta_expand_constructor():
mod = tvm.parser.fromtext(
r"""
type List[A] {
Cons(A, List[A]),
Nil,
}
def @main[A]() -> fn(A, List[A]) -> List[A] {
Cons
}
"""
)
seq = tvm.transform.Sequential(
[_transform.EtaExpand(expand_constructor=True), _transform.InferType()]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
expected = tvm.parser.fromtext(
r"""
type List[A] {
Cons(A, List[A]),
Nil,
}
def @main[A]() -> fn(A, List[A]) -> List[A] {
fn [A](%x: A, %xs: List[A]) -> List[A] {
Cons(%x, %xs)
}
}
"""
)
tvm.ir.assert_structural_equal(mod["main"], expected["main"], map_free_vars=True)
if __name__ == "__main__":
test_eta_expand_global_var()
test_eta_expand_constructor() |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.relay.transform |
import fake_quantization_to_integer
def compare_fq_to_int(expr, args, allow_rounding_error=False):
mod = tvm.IRModule.from_expr(expr)
mod = tvm.relay.transform.InferType()(mod)
mod_int = tvm.relay.transform.FakeQuantizationToInteger()(mod)
assert not tvm.ir.structural_equal(mod, mod_int)
result = (
relay.create_executor("vm", mod=mod, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_int = (
relay.create_executor("vm", mod=mod_int, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
if allow_rounding_error:
assert np.all(np.abs(result.astype("int32") - result_int.astype("int32")) <= 1)
else:
assert np.array_equal(result, result_int)
def test_fake_quantize_conv():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.conv2d(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
kernel_size=[5, 5],
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
compare_fq_to_int(op, [x_np, w_np])
def test_fake_quantize_conv_per_channel():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
one = relay.const([1.0] * 16)
zero_point = relay.const([np.random.randint(0, 255)] * 16)
op = relay.op.nn.conv2d(
relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(0)),
relay.qnn.op.dequantize(
w, relay.const(np.random.random([16]).astype("floa |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.