text
stringlengths 1
2.05k
|
---|
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
in_1 = relay.var("in_1", shape=(10, 10))
in_2 = relay.var("in_2", shape=(10, 10))
add_node = relay.add(in_1, in_2)
relu_node = relay.nn.relu(add_node)
add_relu = relay.Function([in_1, in_2], relu_node)
add_relu = add_relu.with_attr("Composite", "test.add_relu")
cb_1 = relay.annotation.compiler_begin(a, "test")
cb_2 = relay.annotation.compiler_begin(b, "test")
r = relay.Call(add_relu, [cb_1, cb_2])
ce_1 = relay.annotation.compiler_end(r, "test")
f = relay.Function([a, b], ce_1)
mod = tvm.IRModule.from_expr(f)
return mod
result = transform.AnnotateTarget("test")(before())
expected = transform.InferType()(after())
assert tvm.ir.structural_equal(expected, result)
def test_double_target():
@tvm.ir.register_op_attr("nn.relu", "target.double.A")
def relu(expr):
return True
def before():
x = relay.var("x", shape=(10, 5))
a_1 = relay.nn.relu(x)
mod = tvm.IRModule.from_expr(a_1)
return mod
for annotate_non_call_ops in [True, False]:
mod = before()
mod1 = transform.AnnotateTarget("double.A", annotate_non_call_ops)(mod)
mod2 = transform.AnnotateTarget("double.A", annotate_non_call_ops)(mod1)
assert tvm.ir.structural_equal(mod1, mod2)
def test_different_targets():
@tvm.ir.register_op_attr("nn.relu", "target.different.A")
def relu(expr):
return True
@tvm.ir.register_op_attr("add", "target.different.B")
def relu(expr):
return True
def before():
x = relay.var("x", shape=(10, 5))
a_1 = relay.nn.relu(x)
b_1 = relay.add(a_1, a_1)
mod = tvm.IRModule.from_expr(b_1)
return mod
for annotate_non_call_ops in [True, False]:
mod = before()
mod1 = transform.AnnotateTarget("different.A", annotate_non_call_ops)(mod)
mod1 = |
transform.AnnotateTarget("different.B", annotate_non_call_ops)(mod1)
mod2 = transform.AnnotateTarget(["different.A", "different.B"], annotate_non_call_ops)(mod)
assert tvm.ir.structural_equal(mod1, mod2)
def test_multiple_runs():
@tvm.ir.register_op_attr("nn.relu", "target.A")
def relu(expr):
return True
@tvm.ir.register_op_attr("add", "target.B")
def add(expr):
return True
def before():
x = relay.var("x", shape=(10, 5))
a_1 = relay.nn.relu(x)
a_2 = relay.abs(a_1)
a_3 = relay.nn.relu(a_1)
out = relay.add(a_2, a_3)
f = relay.Function([x], out)
mod = tvm.IRModule.from_expr(f)
return mod
for annotate_non_call_ops in [True, False]:
mod = transform.AnnotateTarget("A", annotate_non_call_ops)(before())
mod = transform.AnnotateTarget("B", annotate_non_call_ops)(mod)
expected = transform.AnnotateTarget(["A", "B"], annotate_non_call_ops)(before())
assert tvm.ir.structural_equal(expected, mod)
def test_ends_with_tuple():
trgt = "clip"
@tvm.ir.register_op_attr("clip", "target." + trgt)
def relu(expr):
return True
def get_model(get_item):
"""Return a model"""
a = relay.var("a", shape=(1, 16, 16, 4), dtype="uint8")
z = relay.op.clip(a, 0, 255)
b = relay.op.clip(z, 0, 15)
c = relay.op.clip(z, 16, 31)
t = relay.Tuple((c, b))
tgi = relay.TupleGetItem(t, 1) if get_item else t
foo = relay.Function([a], tgi)
return tvm.IRModule.from_expr(tgi)
def get_expected(annotate_non_call_ops, get_item):
a_ = relay.var("a", shape=(1, 16, 16, 4), dtype="uint8")
a = relay.annotation.compiler_begin(a_, trgt)
z = relay.op.clip(a, 0, 255)
z1 = relay.annotation.compiler_end(z, trgt)
z1 = relay.annotation.compiler_begin(z1, trgt)
b = relay.op.clip(z1, 0, 15)
b = relay.annotation.compiler_end(b, trgt)
b = relay.annotation.co |
mpiler_begin(b, trgt) if annotate_non_call_ops else b
z2 = relay.annotation.compiler_end(z, trgt)
z2 = relay.annotation.compiler_begin(z2, trgt)
c = relay.op.clip(z2, 16, 31)
c = relay.annotation.compiler_end(c, trgt)
c = relay.annotation.compiler_begin(c, trgt) if annotate_non_call_ops else c
t = relay.Tuple((c, b))
t = relay.annotation.compiler_end(t, trgt) if annotate_non_call_ops else t
if get_item:
t = relay.annotation.compiler_begin(t, trgt) if annotate_non_call_ops else t
tgi = relay.TupleGetItem(t, 1)
tgi = relay.annotation.compiler_end(tgi, trgt) if annotate_non_call_ops else tgi
else:
tgi = t
foo = relay.Function([a_], tgi)
return tvm.IRModule.from_expr(foo)
for get_item in [True, False]:
for annotate_non_call_ops in [False, True]:
mod = get_model(get_item)
mod = transform.AnnotateTarget("clip", annotate_non_call_ops)(mod)
expected = transform.InferType()(get_expected(annotate_non_call_ops, get_item))
assert tvm.ir.structural_equal(expected, mod)
def test_if_else():
target = "test_if_else"
@tvm.ir.register_op_attr("equal", "target." + target)
def relu(expr):
return True
@tvm.ir.register_op_attr("tanh", "target." + target)
def tanh(expr):
return True
@tvm.ir.register_op_attr("sigmoid", "target." + target)
def sigmoid(expr):
return True
@tvm.ir.register_op_attr("erf", "target." + target)
def erf(expr):
return True
"""Test that If-else nodes compiles correctly when surrounded by supported nodes."""
def before():
data = relay.var("data", shape=(1, 32))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32")
eq = relay.equal(eq1, eq2)
true_branch = relay.tanh(data)
false_branch = relay.sigmoid(data)
ife = relay.If(eq, true_bran |
ch, false_branch)
out = relay.erf(ife)
func = relay.Function([data, eq1, eq2], out)
mod = tvm.IRModule.from_expr(func)
return mod
def after():
data = relay.var("data", shape=(1, 32))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32")
cb_1 = relay.annotation.compiler_begin(eq1, target)
cb_2 = relay.annotation.compiler_begin(eq2, target)
equality_condition = relay.equal(cb_1, cb_2)
ce_1 = relay.annotation.compiler_end(equality_condition, target)
cb_3 = relay.annotation.compiler_begin(data, target)
true_branch = relay.tanh(cb_3)
ce_2 = relay.annotation.compiler_end(true_branch, target)
cb_4 = relay.annotation.compiler_begin(data, target)
false_branch = relay.sigmoid(cb_4)
ce_3 = relay.annotation.compiler_end(false_branch, target)
if_condition = relay.If(ce_1, ce_2, ce_3)
cb_5 = relay.annotation.compiler_begin(if_condition, target)
erf_out = relay.erf(cb_5)
ce_4 = relay.annotation.compiler_end(erf_out, target)
func = relay.Function([data, eq1, eq2], ce_4)
mod = tvm.IRModule.from_expr(func)
return mod
expected = transform.InferType()(after())
for annotate_non_call_ops in [True, False]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
assert tvm.ir.structural_equal(expected, result)
def test_while_let():
target = "test_while_let"
@tvm.ir.register_op_attr("less", "target." + target)
def less(expr):
return True
@tvm.ir.register_op_attr("add", "target." + target)
def add(expr):
return True
@tvm.ir.register_op_attr("zeros_like", "target." + target)
def zeros_like(expr):
return True
"""Test that let nodes compiles correctly when surrounded by other nodes."""
def before():
var1 = relay.var("var1", shape=(2,))
var2 = relay. |
var("var2", shape=(), dtype="int32")
var3 = relay.var("var3", shape=(2,))
cond = relay.less(var2, relay.const(10, dtype="int32"))
loop = relay.var("while_loop")
ii = var2 + relay.const(1, dtype="int32")
ss = var3 + var1
true_branch = loop(ii, ss)
ife = relay.If(cond, true_branch, var3)
func_1 = relay.Function([var2, var3], ife)
ret = relay.Let(loop, func_1, loop(relay.const(0, dtype="int32"), relay.zeros_like(var1)))
func_2 = relay.Function([var1], ret)
mod = tvm.IRModule.from_expr(func_2)
return mod
def after(annotate_non_call_ops):
var1 = relay.var("var1", shape=(2,))
var2 = relay.var("var2", shape=(), dtype="int32")
var3 = relay.var("var3", shape=(2,))
var4 = relay.const(10, dtype="int32")
cb_1 = relay.annotation.compiler_begin(var2, target)
cb_2 = relay.annotation.compiler_begin(var4, target)
less_condition = relay.less(cb_1, cb_2)
ce_1 = relay.annotation.compiler_end(less_condition, target)
loop = relay.var("while_loop")
cb_3 = relay.annotation.compiler_begin(var2, target)
cb_4 = relay.annotation.compiler_begin(relay.const(1, dtype="int32"), target)
add_op_1 = relay.add(cb_3, cb_4)
ce_2 = relay.annotation.compiler_end(add_op_1, target)
cb_5 = relay.annotation.compiler_begin(ce_2, "default") if annotate_non_call_ops else ce_2
cb_6 = relay.annotation.compiler_begin(var3, target)
cb_7 = relay.annotation.compiler_begin(var1, target)
add_op_2 = relay.add(cb_6, cb_7)
ce_3 = relay.annotation.compiler_end(add_op_2, target)
cb_8 = relay.annotation.compiler_begin(ce_3, "default") if annotate_non_call_ops else ce_3
true_branch = loop(cb_5, cb_8)
ce_4 = (
relay.annotation.compiler_end(true_branch, "default")
if annotate_non_call_ops
else true_branch
)
if_condition = relay.If(ce_1, ce_ |
4, var3)
const_1 = relay.const(0, dtype="int32")
cb_9 = (
relay.annotation.compiler_begin(const_1, "default")
if annotate_non_call_ops
else const_1
)
cb_10 = relay.annotation.compiler_begin(var1, target)
zeros_like = relay.zeros_like(cb_10)
ce_5 = relay.annotation.compiler_end(zeros_like, target)
cb_11 = relay.annotation.compiler_begin(ce_5, "default") if annotate_non_call_ops else ce_5
while_condition = loop(cb_9, cb_11)
ce_6 = (
relay.annotation.compiler_end(while_condition, "default")
if annotate_non_call_ops
else while_condition
)
func_1 = relay.Function([var2, var3], if_condition)
ret = relay.Let(loop, func_1, ce_6)
func_2 = relay.Function([var1], ret)
mod = tvm.IRModule.from_expr(func_2)
return mod
for annotate_non_call_ops in [False, True]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
expected = transform.InferType()(after(annotate_non_call_ops))
assert tvm.ir.structural_equal(expected, result)
def test_if_free_vars():
target = "test_if_free_vars"
@tvm.ir.register_op_attr("equal", "target." + target)
def equal(expr):
return True
@tvm.ir.register_op_attr("sigmoid", "target." + target)
def sigmoid(expr):
return True
@tvm.ir.register_op_attr("erf", "target." + target)
def erf(expr):
return True
"""Test that If-else nodes compiles correctly when surrounded by free variables"""
def before():
data = relay.var("data", shape=(1, 32))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32")
eq = relay.equal(eq1, eq2)
true_branch = relay.zeros(shape=(1, 32), dtype="float32")
false_branch = relay.sigmoid(data)
ife = relay.If(eq, true_branch, false_branch)
out = relay.erf(ife)
func = re |
lay.Function([data, eq1, eq2], out)
mod = tvm.IRModule.from_expr(func)
return mod
def after():
data = relay.var("data", shape=(1, 32))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32")
cb_1 = relay.annotation.compiler_begin(eq1, target)
cb_2 = relay.annotation.compiler_begin(eq2, target)
equality_condition = relay.equal(cb_1, cb_2)
ce_1 = relay.annotation.compiler_end(equality_condition, target)
true_branch = relay.zeros(shape=(1, 32), dtype="float32")
cb_3 = relay.annotation.compiler_begin(data, target)
false_branch = relay.sigmoid(cb_3)
ce_2 = relay.annotation.compiler_end(false_branch, target)
if_condition = relay.If(ce_1, true_branch, ce_2)
cb_4 = relay.annotation.compiler_begin(if_condition, target)
erf_out = relay.erf(cb_4)
ce_3 = relay.annotation.compiler_end(erf_out, target)
func = relay.Function([data, eq1, eq2], ce_3)
mod = tvm.IRModule.from_expr(func)
return mod
for annotate_non_call_ops in [True, False]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
expected = transform.InferType()(after())
assert tvm.ir.structural_equal(expected, result)
def test_free_vars_zeros():
target = "test_free_vars_zeros"
"""Test that free variables compile correctly on their own"""
def before():
func = relay.Function([], relay.zeros(shape=(0), dtype="float32"))
mod = tvm.IRModule.from_expr(func)
return mod
def after():
func = relay.Function([], relay.zeros(shape=(0), dtype="float32"))
mod = tvm.IRModule.from_expr(func)
return mod
result = transform.AnnotateTarget(target)(before())
expected = transform.InferType()(after())
assert tvm.ir.structural_equal(expected, result)
def test_empty_tuple():
target = "test_empty_tuple"
"""An empty tuple shoul |
d behave just like a call with no args (see above test)."""
def before():
func = relay.Function([], relay.Tuple([]))
mod = tvm.IRModule.from_expr(func)
return mod
def after():
func = relay.Function([], relay.Tuple([]))
mod = tvm.IRModule.from_expr(func)
return mod
for annotate_non_call_ops in [True, False]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
expected = transform.InferType()(after())
assert tvm.ir.structural_equal(expected, result)
if __name__ == "__main__":
test_extern_dnnl()
test_composite_function()
test_multiple_ends()
test_type_propagation()
test_tuple()
test_multiple_runs()
test_if_else()
test_while_let()
test_if_free_vars()
test_free_vars_zeros()
test_different_targets()
test_double_target()
test_ends_with_tuple()
test_ref_create_read_write()
test_empty_tuple() |
import numpy as np |
import pytest |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import testing
from tvm.relay.expr |
import Call
from tvm.topi.utils |
import get_const_tuple
def quantize_and_build(out, skip_conv_layers=[]):
f = relay.Function(relay.analysis.free_vars(out), out)
mod, params = testing.create_workload(f)
with relay.quantize.qconfig(skip_conv_layers=skip_conv_layers):
qmod = relay.quantize.quantize(mod, params)
relay.build(qmod, "llvm", params=params)
return qmod
def test_mul_rewrite():
"""a test case where rhs of mul is not constant"""
data = relay.var("data", shape=(1, 16, 64, 64))
multiplier = relay.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))
conv = relay.nn.conv2d(
data, relay.var("weight"), kernel_size=(3, 3), padding=(1, 1), channels=16
)
act = relay.nn.relu(data=conv)
quantize_and_build(act * multiplier)
pool = relay.nn.global_avg_pool2d(data=act)
quantize_and_build(act * pool)
def test_skip_conv():
data = relay.var("data", shape=(1, 16, 64, 64))
np_weight = np.random.rand(16, 16, 3, 3)
conv0_weight = relay.Constant(tvm.nd.array(np_weight)).astype("float32")
conv1_weight = relay.Constant(tvm.nd.array(np_weight)).astype("float32")
multiplier = relay.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))
conv0 = relay.nn.conv2d(data, conv0_weight, kernel_size=(3, 3), padding=(1, 1), channels=16)
act0 = relay.nn.relu(data=conv0)
conv1 = relay.nn.conv2d(act0, conv1_weight, kernel_size=(3, 3), padding=(1, 1), channels=16)
act1 = relay.nn.relu(data=conv1)
quantize_and_build(act1 * multiplier)
quantize_and_build(act1 * multiplier, skip_conv_layers=[0])
quantize_and_build(act1 * multiplier, skip_conv_layers=[1])
quantize_and_build(act1 * multiplier, skip_conv_layers=[0, 1])
def test_stop_quantize():
data = relay.var("data", shape=(1, 16, 64, 64))
np_weight0 = np.random.rand(16, 16, 3, 3)
conv0_weight = relay.Constant(tvm.nd.array(np_weight0)).astype("float32")
np_weight1 = np.random.rand(16, 16, 1, 1)
conv1_weight = relay.Constant(tvm.nd.array(np_weight1)).astype("float32")
multiplier = rela |
y.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))
conv0 = relay.nn.conv2d(data, conv0_weight, kernel_size=(3, 3), padding=(1, 1), channels=16)
act0 = relay.nn.relu(data=conv0)
pool = relay.nn.global_avg_pool2d(data=act0)
conv1 = relay.nn.conv2d(pool, conv1_weight, kernel_size=(1, 1), padding=(0, 0), channels=16)
act1 = relay.nn.relu(data=conv1)
quantize_and_build(act1 * multiplier)
def test_batch_flatten_rewrite():
data = relay.var("data", shape=(1, 16, 64, 64), dtype="float32")
out = relay.nn.conv2d(
data, relay.var("weight"), kernel_size=(3, 3), padding=(1, 1), channels=16
)
out = relay.nn.batch_flatten(out)
qmod = quantize_and_build(out)
def _check_batch_flatten(node):
if isinstance(node, Call):
if node.op.name == "nn.batch_flatten":
assert node.checked_type.dtype == "int8"
relay.analysis.post_order_visit(qmod["main"], _check_batch_flatten)
def test_batch_matmul_rewrite():
data = relay.var("data", shape=(1, 4, 16, 16))
data2 = relay.sigmoid(relay.var("data", shape=(4, 16, 64)))
out = relay.nn.conv2d(data, relay.var("weight"), kernel_size=(3, 3), padding=(1, 1), channels=8)
out = relay.nn.batch_flatten(out)
out = relay.reshape(out, [1, 32, 64])
out = relay.nn.batch_matmul(out, data2)
qmod = quantize_and_build(out)
def _check_batch_matmul(node):
if isinstance(node, Call):
if node.op.name in ["nn.batch_matmul", "nn.conv2d"]:
assert node.checked_type.dtype == "int32"
elif node.op.name == "nn.batch_flatten":
assert node.checked_type.dtype == "int8"
relay.analysis.post_order_visit(qmod["main"], _check_batch_matmul)
def get_calibration_dataset(mod, input_name):
dataset = []
input_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
for i in range(5):
data = np.random.uniform(size=input_shape)
dataset.append({input_name: data})
return dataset
@p |
ytest.mark.parametrize("create_target", [True, False])
def test_calibrate_target(create_target):
mod, params = testing.synthetic.get_workload()
dataset = get_calibration_dataset(mod, "data")
with relay.quantize.qconfig(calibrate_mode="kl_divergence"):
if create_target:
with tvm.target.Target("llvm"):
relay.quantize.quantize(mod, params, dataset)
else:
relay.quantize.quantize(mod, params, dataset)
def test_calibrate_memory_bound():
mod, params = testing.synthetic.get_workload()
dataset = get_calibration_dataset(mod, "data") |
import multiprocessing
num_cpu = multiprocessing.cpu_count()
with relay.quantize.qconfig(calibrate_mode="kl_divergence", calibrate_chunk_by=num_cpu):
relay.quantize.quantize(mod, params, dataset)
def test_calibrate_percentile():
mod, params = testing.synthetic.get_workload()
dataset = get_calibration_dataset(mod, "data")
with relay.quantize.qconfig(calibrate_mode="percentile"):
relay.quantize.quantize(mod, params, dataset)
BASE_CFG = {
"skip_conv_layers": [],
"skip_dense_layers": False,
"dtype_input": "int8",
"dtype_weight": "int8",
"dtype_activation": "int32",
}
def gen_rand_tvm(tt, low, high):
if "int" in tt.dtype:
data_np = np.random.randint(low, high, size=get_const_tuple(tt.shape), dtype=tt.dtype)
elif "float" in tt.dtype:
data_np = np.random.uniform(low, high, size=get_const_tuple(tt.shape)).astype(tt.dtype)
else:
assert False, "unknown dtype"
return tvm.nd.array(data_np, device=tvm.cpu(0))
def verify_partition_fails(mod, params):
with relay.quantize.qconfig(**BASE_CFG, partition_conversions="enabled"):
partitioned_mod = relay.quantize.quantize(mod, params)
try:
with relay.quantize.qconfig(**BASE_CFG, partition_conversions="fully_integral"):
partitioned_mod = relay.quantize.quantize(mod, params)
raise RuntimeError("partitioning should have failed")
except AssertionError:
pass
def verify_partition(mod, params):
with relay.quantize.qconfig(**BASE_CFG, paritition_conversions="disabled"):
unpartitioned_mod = relay.quantize.quantize(mod, params)
assert (
len(unpartitioned_mod.get_global_vars()) == 1
), "unpartitioned module should only have one function"
with relay.quantize.qconfig(**BASE_CFG, partition_conversions="fully_integral"):
partitioned_mod = relay.quantize.quantize(mod, params)
params = [gen_rand_tvm(param.type_annotation, 0, 1) for param in partitioned_mod["main"].params |
]
def _eval_mod(mod):
return relay.create_executor("vm", device=tvm.cpu(0), target="llvm", mod=mod).evaluate()(
*params
)
partitioned_mod_result = _eval_mod(partitioned_mod)
unpartitioned_mod_result = _eval_mod(unpartitioned_mod)
tvm.testing.assert_allclose(unpartitioned_mod_result.numpy(), partitioned_mod_result.numpy())
def test_add_partition():
mod = tvm.parser.parse(
"""
def @main(
%x: Tensor[(10, 10), float32],
%y: Tensor[(10, 10), float32]) {
add(%x, %y)
}
"""
)
params = {}
verify_partition_fails(mod, params)
def test_conv2d_partition():
mod = tvm.parser.parse(
"""
def @main(
%x: Tensor[(1, 4, 16, 16), float32],
%w: Tensor[(4, 4, 3, 3), float32]) -> Tensor[(1, 4, 16, 16), float32] {
nn.conv2d(%x, %w,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3])
}
"""
)
weight_ty = mod["main"].params[1].checked_type
params = {"w": gen_rand_tvm(weight_ty, 0, 1)}
verify_partition(mod, params)
def test_multiple_arg_conversions_partition():
mod = tvm.parser.parse(
"""
def @main(
%x1: Tensor[(1, 4, 16, 16), float32],
%w1: Tensor[(4, 4, 3, 3), float32],
%x2: Tensor[(1, 4, 16, 16), float32],
%w2: Tensor[(4, 4, 3, 3), float32]
) -> Tensor[(1, 4, 16, 16), float32] {
%0 = nn.conv2d(%x1, %w1,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3]);
%1 = nn.conv2d(%x2, %w2,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3]);
add(%0, %1)
}
"""
)
w1_ty = mod["main"].params[1].checked_type
w2_ty = mod["main"].params[3].checked_type
params = {"w1": gen_rand_tvm(w1_ty, 0, 1), "w2": gen_rand_tvm(w2_ty, 0, 1)}
verify_partition(mod, params)
def test_unquantizable_prefix_partition():
mod = tvm.parser.parse(
"""
def @main(
%x: Tensor[(1, 4, 16, 16), f |
loat32],
%b: Tensor[(4), float32],
%w: Tensor[(4, 4, 3, 3), float32]) -> Tensor[(1, 4, 16, 16), float32] {
%0 = nn.bias_add(%x, %b);
nn.conv2d(%0, %w,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3])
}
"""
)
bias_ty = mod["main"].params[1].checked_type
weight_ty = mod["main"].params[2].checked_type
params = {"b": gen_rand_tvm(bias_ty, 0, 1), "w": gen_rand_tvm(weight_ty, 0, 1)}
verify_partition_fails(mod, params)
def test_unquantizable_core_partition():
mod = tvm.parser.parse(
"""
def @main(
%x1: Tensor[(1, 4, 16, 16), float32],
%w1: Tensor[(4, 4, 3, 3), float32],
%b: Tensor[(4), float32],
%w2: Tensor[(4, 4, 3, 3), float32]) -> Tensor[(1, 4, 16, 16), float32] {
%0 = nn.conv2d(%x1, %w1,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3]);
%1 = nn.bias_add(%0, %b);
nn.conv2d(%1, %w2,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3])
}
"""
)
w1_ty = mod["main"].params[1].checked_type
bias_ty = mod["main"].params[2].checked_type
w2_ty = mod["main"].params[3].checked_type
params = {
"w1": gen_rand_tvm(w1_ty, 0, 1),
"w2": gen_rand_tvm(w2_ty, 0, 1),
"b": gen_rand_tvm(bias_ty, 0, 1),
}
verify_partition_fails(mod, params)
def test_unquantizable_suffix_partition():
mod = tvm.parser.parse(
"""
def @main(
%x: Tensor[(1, 4, 16, 16), float32],
%w: Tensor[(4, 4, 3, 3), float32],
%b: Tensor[(4), float32]) -> Tensor[(1, 4, 16, 16), float32] {
%0 = nn.conv2d(%x, %w,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3]);
nn.bias_add(%0, %b)
}
"""
)
weight_ty = mod["main"].params[1].checked_type
bias_ty = mod["main"].params[2].checked_type
params = {"w": gen_rand_tvm(weight_ty, 0, 1), "b": gen_rand_tvm(bias_ty, 0, 1)}
verify_partition_fa |
ils(mod, params)
def test_left_shift_negative():
data = relay.var("data", shape=(1, 16, 64, 64))
weight = relay.const(np.full((16, 16, 3, 3), 256.0))
conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1), channels=16)
relu = relay.nn.relu(conv2d)
mod = tvm.IRModule.from_expr(relu)
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="global_scale", global_scale=8.0, skip_conv_layers=None
):
qnn_mod = relay.quantize.quantize(mod) |
class OpFinder(relay.ExprVisitor):
def __init__(self, op_name):
super(OpFinder, self).__init__()
self._op_name = op_name
self.ops = list()
def visit_call(self, call):
super().visit_call(call)
if call.op.name == self._op_name:
self.ops.append(call)
opf = OpFinder("left_shift")
opf.visit(qnn_mod["main"])
assert len(opf.ops) > 0, 'Broken case, can\'t find any "left_shift" operators.'
for left_shift_op in opf.ops:
shift_amount = left_shift_op.args[1].data.numpy()
assert shift_amount >= 0, "Shift amount must be non-negative."
def test_dense_conv2d_rewrite():
n, c, h, w = 1, 16, 64, 64
data = relay.var("data", relay.TensorType((n, c, h, w)))
inp = relay.var("inp", relay.TensorType((n, c * h * w)))
weight_T = relay.const(np.random.random((n, c * h * w)), dtype="float32")
bias = relay.const(np.random.random((n,)), dtype="float32")
conv_w = relay.const(np.random.random((16, 16, 3, 3)), dtype="float32")
dense_o = relay.nn.dense(inp, weight_T)
linear_o = relay.nn.bias_add(dense_o, bias)
conv2d_o = relay.nn.conv2d(data, conv_w, kernel_size=(3, 3), padding=(1, 1), channels=16)
result = relay.Tuple((linear_o, conv2d_o))
mod = tvm.IRModule.from_expr(result)
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="global_scale", global_scale=8.0, skip_dense_layer=False
):
qnn_mod = relay.quantize.quantize(mod)
def _check_dense(node):
if isinstance(node, Call):
if node.op.name == "nn.dense":
assert node.args[0].checked_type.dtype == "int8"
assert node.args[1].checked_type.dtype == "int8"
assert node.checked_type.dtype == "int32"
if node.op.name == "nn.conv2d":
assert node.args[0].checked_type.dtype == "float32"
assert node.args[1].checked_type.dtype == "float32" |
assert node.checked_type.dtype == "float32"
relay.analysis.post_order_visit(qnn_mod["main"], _check_dense)
if __name__ == "__main__":
test_mul_rewrite()
test_batch_flatten_rewrite()
test_batch_matmul_rewrite()
test_calibrate_target(False)
test_calibrate_target(True)
test_calibrate_memory_bound()
test_calibrate_percentile()
test_add_partition()
test_conv2d_partition()
test_multiple_arg_conversions_partition()
test_unquantizable_prefix_partition()
test_unquantizable_core_partition()
test_unquantizable_suffix_partition()
test_left_shift_negative()
test_dense_conv2d_rewrite()
test_skip_conv()
test_stop_quantize() |
import tvm
from tvm |
import te |
import tvm.relay as relay |
import tvm.relay.transform as _transform
def test_canonicalize_cast():
def before(data, conv_weight, bias1, bias2):
x = relay.nn.conv2d(
data, conv_weight, channels=16, kernel_size=(3, 3), padding=(1, 1), out_dtype="int8"
)
x1 = relay.cast(x, dtype="int32")
y1 = relay.add(x1, bias1)
y2 = relay.add(x1, bias2)
y = relay.add(y1, y2)
return relay.Function([data, conv_weight, bias1, bias2], y)
def expected(data, conv_weight, bias1, bias2):
x = relay.nn.conv2d(
data, conv_weight, channels=16, kernel_size=(3, 3), padding=(1, 1), out_dtype="int8"
)
x1 = relay.cast(x, dtype="int32")
x2 = relay.cast(x, dtype="int32")
y1 = relay.add(x1, bias1)
y2 = relay.add(x2, bias2)
y = relay.add(y1, y2)
return relay.Function([data, conv_weight, bias1, bias2], y)
def check(shape):
data = relay.var("data", shape=shape, dtype="int8")
conv_weight = relay.var("weight")
bias1 = relay.var("bias1", shape=(16, 1, 1), dtype="int32")
bias2 = relay.var("bias2", shape=(16, 1, 1), dtype="int32")
y = before(data, conv_weight, bias1, bias2)
mod = tvm.IRModule.from_expr(y)
seq = tvm.transform.Sequential(
[_transform.InferType(), _transform.CanonicalizeCast(), _transform.InferType()]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
y = mod["main"]
y_expected = expected(data, conv_weight, bias1, bias2)
gv = relay.GlobalVar("expected")
mod[gv] = y_expected
mod = _transform.InferType()(mod)
y_expected = mod["expected"]
assert tvm.ir.structural_equal(y, y_expected)
check((1, 16, 7, 7))
if __name__ == "__main__":
test_canonicalize_cast() |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.analysis |
import check_kind |
import pytest
def test_typevar_kind():
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.ShapeVar)
tp3 = relay.TypeVar("tp3", relay.TypeKind.Constraint)
assert check_kind(tp1) == relay.TypeKind.Type
assert check_kind(tp2) == relay.TypeKind.ShapeVar
assert check_kind(tp3) == relay.TypeKind.Constraint
def test_tuple_kind():
tp = relay.TypeVar("tp", relay.TypeKind.Type)
tt = relay.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
tf = relay.FuncType(
tvm.runtime.convert([]), tt, tvm.runtime.convert([]), tvm.runtime.convert([])
)
fields = tvm.runtime.convert([tp, tf, tt])
tup_ty = relay.TupleType(fields)
assert check_kind(tup_ty) == relay.TypeKind.Type
def test_func_kind():
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.Type)
shape = tvm.runtime.convert([1, 2, 3])
dtype = "float32"
tensor_type = relay.TensorType(shape, dtype)
tr = relay.TypeRelation(None, tvm.runtime.convert([tensor_type, tp1]), 1, None)
type_params = tvm.runtime.convert([tp1, tp2])
type_constraints = tvm.runtime.convert([tr])
arg_types = tvm.runtime.convert([tp1, tensor_type])
ret_type = relay.TupleType(tvm.runtime.convert([tp2, tensor_type]))
tf = relay.FuncType(arg_types, ret_type, type_params, type_constraints)
assert check_kind(tf) == relay.TypeKind.Type
def test_ref_kind():
tt = relay.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
ft = relay.FuncType(
tvm.runtime.convert([]), tt, tvm.runtime.convert([]), tvm.runtime.convert([])
)
rt1 = relay.RefType(tt)
assert check_kind(rt1) == relay.TypeKind.Type
rt2 = relay.RefType(ft)
assert check_kind(rt2) == relay.TypeKind.Type
rt3 = relay.RefType(relay.TupleType([rt1, rt2]))
assert check_kind(rt3) == relay.TypeKind.Type
def test_relation_kind():
tp = relay.TypeVar("tp", relay.TypeKind.Type)
tt = relay.TensorType(t |
vm.runtime.convert([1, 2, 3]), "float32")
tf = relay.FuncType(
tvm.runtime.convert([]), tt, tvm.runtime.convert([]), tvm.runtime.convert([])
)
args = tvm.runtime.convert([tf, tt, tp])
tr = relay.TypeRelation(None, args, 2, None)
assert check_kind(tr) == relay.TypeKind.Constraint
def test_global_typevar_kind():
v1 = relay.GlobalTypeVar("gtv1", relay.TypeKind.AdtHandle)
v2 = relay.GlobalTypeVar("gtv2", relay.TypeKind.Type)
assert check_kind(v1) == relay.TypeKind.AdtHandle
assert check_kind(v2) == relay.TypeKind.Type
def test_typecall_kind():
gtv = relay.GlobalTypeVar("gtv")
mod = tvm.IRModule()
data = relay.TypeData(gtv, [], [])
mod[gtv] = data
empty_call = relay.TypeCall(gtv, [])
assert check_kind(empty_call, mod) == relay.TypeKind.Type
new_mod = tvm.IRModule()
tv = relay.TypeVar("tv")
new_data = relay.TypeData(gtv, [tv], [])
new_mod[gtv] = new_data
call = relay.TypeCall(gtv, [relay.TupleType([])])
assert check_kind(call, new_mod) == relay.TypeKind.Type
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_tuple_kind():
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
tp2 = relay.TypeVar("tp2", relay.TypeKind.BaseType)
tp3 = relay.TypeVar("tp3", relay.TypeKind.Constraint)
fields = tvm.runtime.convert([tp1, tp2, tp3])
tup_ty = relay.TupleType(fields)
check_kind(tup_ty)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_func_kind():
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
tp2 = relay.TypeVar("tp2", relay.TypeKind.BaseType)
tp3 = relay.TypeVar("tp3", relay.TypeKind.Constraint)
type_params = tvm.runtime.convert([tp1, tp2, tp3])
type_constraints = tvm.runtime.convert([])
arg_types = tvm.runtime.convert([tp1, tp2])
ret_type = tp3
tf = relay.FuncType(arg_types, ret_type, type_params, type_constraints)
check_kind(tf)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_ref_kind():
tp = relay.TypeVar("tp", r |
elay.TypeKind.ShapeVar)
rt = relay.RefType(tp)
check_kind(rt)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_relation_kind():
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
tp2 = relay.TypeVar("tp2", relay.TypeKind.BaseType)
tp3 = relay.TypeVar("tp3", relay.TypeKind.Constraint)
args = tvm.runtime.convert([tp1, tp2, tp3])
func = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
tr = relay.TypeRelation(func, args, 2, None)
check_kind(tr)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_typecall_invalid_callee():
gtv = relay.GlobalTypeVar("v1", relay.TypeKind.Type)
check_kind(relay.TypeCall(gtv, []))
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_typecall_invalid_args():
mod = tvm.IRModule()
gtv = relay.GlobalTypeVar("v1")
data = relay.TypeData(gtv, [], [])
mod[gtv] = data
check_kind(relay.TypeCall(gtv, [data]))
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_typecall_invalid_num_args():
mod = tvm.IRModule()
gtv = relay.GlobalTypeVar("v1")
tv = relay.TypeVar("tv")
data = relay.TypeData(gtv, [tv], [])
mod[gtv] = data
check_kind(relay.TypeCall(gtv, []))
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func_with_invalid_ret_type():
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.ShapeVar)
tf = relay.FuncType(
tvm.runtime.convert([tp1]), tp2, tvm.runtime.convert([tp1, tp2]), tvm.runtime.convert([])
)
check_kind(tf)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func_with_invalid_arg_types():
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
tp2 = relay.TypeVar("tp2", relay.TypeKind.Type)
tf = relay.FuncType(
tvm.runtime.convert([tp1]), tp2, tvm.runtime.convert([tp1, tp2]), tvm.runtime.convert([])
)
check_kind(tf)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func_with_invalid_tuple():
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar |
)
ret_type = relay.TupleType(tvm.runtime.convert([tp1, tp1, tp1]))
tf = relay.FuncType(
tvm.runtime.convert([]), ret_type, tvm.runtime.convert([tp1]), tvm.runtime.convert([])
)
check_kind(tf)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func_with_invalid_relation():
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.ShapeVar)
tp3 = relay.TypeVar("tp3", relay.TypeKind.Constraint)
func = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Identity")
tr = relay.TypeRelation(func, tvm.runtime.convert([tp2, tp3]), 1, None)
tf = relay.FuncType(
tvm.runtime.convert([tp1]),
tp1,
tvm.runtime.convert([tp1, tp2, tp3]),
tvm.runtime.convert([tr]),
)
check_kind(tf)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_tuple_with_invalid_func():
tensor_type = relay.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
tf = relay.FuncType(
tvm.runtime.convert([]), tp1, tvm.runtime.convert([tp1]), tvm.runtime.convert([])
)
tup_ty = relay.TupleType(tvm.runtime.convert([tensor_type, tf]))
check_kind(tup_ty)
if __name__ == "__main__":
test_tuple_kind()
test_func_kind()
test_ref_kind()
test_relation_kind()
test_global_typevar_kind()
test_typecall_kind()
test_invalid_tuple_kind()
test_invalid_func_kind()
test_invalid_ref_kind()
test_invalid_relation_kind()
test_typecall_invalid_callee()
test_typecall_invalid_args()
test_typecall_invalid_num_args()
test_func_with_invalid_ret_type()
test_func_with_invalid_arg_types()
test_func_with_invalid_tuple()
test_func_with_invalid_relation()
test_tuple_with_invalid_func() |
import tvm |
import tvm.testing |
import pytest
from tvm.relay.transform |
import CollagePartition, InferType, CapturePostDfsIndexInSpans
from tvm.target |
import make_compilation_config
from tvm.relay.collage |
import MockCostEstimator
from unittest.mock |
import patch
from tvm.relay.dataflow_pattern |
import is_op, wildcard
def test_pattern_table():
def relu_pattern():
return is_op("nn.relu")(wildcard())
def add_pattern():
return is_op("add")(wildcard(), wildcard())
def concatenate_pattern():
return is_op("concatenate")(wildcard())
def predicate(expr):
return True
return [
("relu", relu_pattern(), predicate),
("add", add_pattern(), predicate),
("concatenate", concatenate_pattern(), predicate),
]
def _mock_get_pattern_table(target):
if target == "example_target_hook":
return test_pattern_table()
def run_collage(
input_mod, targets, cost_estimator, expected_mod, tvm_max_depth=8, byoc_max_depth=8
):
ctxt = {
"relay.collage.tvm_max_depth": tvm_max_depth,
"relay.collage.byoc_max_depth": byoc_max_depth,
}
expected_mod = InferType()(expected_mod)
pass_ctxt = tvm.transform.PassContext(config=ctxt)
with pass_ctxt:
config = make_compilation_config(pass_ctxt, targets)
actual_mod = InferType()(input_mod)
actual_mod = CapturePostDfsIndexInSpans()(actual_mod)
actual_mod = CollagePartition(config, cost_estimator)(actual_mod)
if not tvm.ir.structural_equal(actual_mod, expected_mod, map_free_vars=True):
print("Input module:")
print(input_mod)
print("Actual module:")
print(actual_mod)
print("Expected module:")
print(expected_mod)
tvm.ir.assert_structural_equal(actual_mod, expected_mod, map_free_vars=True)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_partition_single_op_llvm(mock_get_pattern_table):
mod_txt = """
def @main(%x: Tensor[(10, 10), float32]) {
nn.relu(%x)
}
"""
mod = tvm.parser.fromtext(mod_txt)
expected_txt = """
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
nn.relu(%x)
} |
"""
expected_mod = tvm.parser.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 1,
"example_target_hook": 2,
}
)
run_collage(mod, targets, cost_estimator, expected_mod)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_partition_single_op_byoc(mock_get_pattern_table):
mod_txt = """
def @main(%x: Tensor[(10, 10), float32]) {
nn.relu(%x)
}
"""
mod = tvm.parser.fromtext(mod_txt)
expected_txt = """
def @collage_example_target_hook_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%0(%FunctionVar_0)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
@collage_example_target_hook_nn_relu(%x)
}
"""
expected_mod = tvm.parser.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
}
)
run_collage(mod, targets, cost_estimator, expected_mod)
@pytest.mark.parametrize("byoc_max_depth", [1, 3])
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_partition_diamond_valid_topology(mock_get_pattern_table, byoc_max_depth):
mod_txt = """
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = abs(%0);
%2 = nn.relu(%1);
add(%1, %2)
}
"""
mod = tvm.parser.fromtext(mod_txt)
expected_3_txt = """ |
def @collage_example_target_hook_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%0(%FunctionVar_0)
}
def @collage_example_target_hook_nn_relu_add(%FunctionVar_02: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu_add") -> Tensor[(10, 10), float32] {
%1 = fn (%FunctionVar_04: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_04)
};
%2 = %1(%FunctionVar_02);
%3 = fn (%FunctionVar_03: Tensor[(10, 10), float32], %FunctionVar_1: Tensor[(10, 10), float32], Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_03, %FunctionVar_1)
};
%3(%FunctionVar_02, %2)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%4 = @collage_example_target_hook_nn_relu(%x);
%5 = abs(%4);
@collage_example_target_hook_nn_relu_add(%5)
}
"""
expected_1_txt = """
def @collage_example_target_hook(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_01: Tensor[(10, 10), float32], %FunctionVar_1: Tensor[(10, 10), float32], Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_01, %FunctionVar_1)
};
%2(%FunctionVar_0, %1)
}
def @collage_example_target_hook_nn_relu(%FunctionVar_03: Tenso |
r[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu") -> Tensor[(10, 10), float32] {
%3 = fn (%FunctionVar_04: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_04)
};
%3(%FunctionVar_03)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%4 = @collage_example_target_hook_nn_relu(%x);
%5 = abs(%4);
@collage_example_target_hook(%5)
}
"""
expected_mod = tvm.parser.fromtext(expected_1_txt if byoc_max_depth == 1 else expected_3_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
}
)
run_collage(
mod, targets, cost_estimator, expected_mod, tvm_max_depth=1, byoc_max_depth=byoc_max_depth
)
@pytest.mark.parametrize("tvm_max_depth", [1, 2, 3])
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_tvm_max_depth(mock_get_pattern_table, tvm_max_depth):
mod_txt = """
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
nn.relu(%1)
}
"""
mod = tvm.parser.fromtext(mod_txt)
expected_txts = {
1: """
def @collage_example_target_hook(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_03: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_03)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
}; |
%3 = %2(%1);
%4 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%4(%3)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
@collage_example_target_hook(%x)
}
""",
2: """
def @collage_example_target_hook_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%0(%FunctionVar_0)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%1 = @collage_example_target_hook_nn_relu(%x);
%2 = nn.relu(%1);
nn.relu(%2)
}
""",
3: """
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
nn.relu(%1)
}
""",
}
expected_mod = tvm.parser.fromtext(expected_txts[tvm_max_depth])
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 100,
"example_target_hook": 99,
}
)
run_collage(
mod, targets, cost_estimator, expected_mod, tvm_max_depth=tvm_max_depth, byoc_max_depth=1
)
@pytest.mark.parametrize("byoc_max_depth", [1, 2, 3])
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_byoc_max_depth(mock_get_pattern_table, byoc_max_depth):
mod_txt = """
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
nn.relu(%1) |
}
"""
mod = tvm.parser.fromtext(mod_txt)
expected_txts = {
1: """
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
nn.relu(%1)
}
""",
2: """
def @collage_example_target_hook_nn_relu_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%2(%1)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%3 = nn.relu(%x);
@collage_example_target_hook_nn_relu_nn_relu(%3)
}
""",
3: """
def @collage_example_target_hook_nn_relu_nn_relu_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu_nn_relu_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_03: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_03)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%3 = %2(%1);
%4 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%4(%3)
} |
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
@collage_example_target_hook_nn_relu_nn_relu_nn_relu(%x)
}
""",
}
expected_mod = tvm.parser.fromtext(expected_txts[byoc_max_depth])
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 99,
"example_target_hook": 100,
}
)
run_collage(
mod, targets, cost_estimator, expected_mod, tvm_max_depth=1, byoc_max_depth=byoc_max_depth
)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_partition_output_tuple(mock_get_pattern_table):
mod_txt = """
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
%2 = abs(%1);
(%0, %1, %2)
}
"""
mod = tvm.parser.fromtext(mod_txt)
expected_txt = """
def @collage_example_target_hook(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook") -> (Tensor[(10, 10), float32], Tensor[(10, 10), float32]) {
%0 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%3 = %2(%1);
(%1, %3)
}
def @main(%x: Tensor[(10, 10), float32]) -> (Tensor[(10, 10), float32], Tensor[(10, 10), float32], Tensor[(10, 10), float32]) {
%4 = @collage_example_target_hook(%x);
%5 = %4.1;
%6 = %4.0;
%7 = abs(%5);
(%6, %5, %7)
}
"""
expected_mod = tvm.parser.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_ |
hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
}
)
run_collage(mod, targets, cost_estimator, expected_mod, tvm_max_depth=2, byoc_max_depth=2)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_partition_intermediate_tuple(mock_get_pattern_table):
mod_txt = """
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
%2 = (%0, %1);
concatenate(%2)
}
"""
mod = tvm.parser.fromtext(mod_txt)
expected_txt = """
def @collage_example_target_hook(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook") -> (Tensor[(10, 10), float32], Tensor[(10, 10), float32]) {
%0 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%3 = %2(%1);
(%1, %3)
}
def @collage_example_target_hook_concatenate(%FunctionVar_03: (Tensor[(10, 10), float32], Tensor[(10, 10), float32]), Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_concatenate") -> Tensor[(20, 10), float32] {
%4 = fn (%FunctionVar_04: (Tensor[(10, 10), float32], Tensor[(10, 10), float32]), Composite="concatenate") -> Tensor[(20, 10), float32] {
concatenate(%FunctionVar_04)
};
%4(%FunctionVar_03)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(20, 10), float32] {
%5 = @collage_example_target_hook(%x);
%6 = %5.0;
%7 = %5.1;
%8 = (%6, %7);
@collage_example_target_hook_concatenate(%8)
}
"""
expected_mod = tvm.parser.fromtext( |
expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
}
)
run_collage(mod, targets, cost_estimator, expected_mod, tvm_max_depth=3, byoc_max_depth=5)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_fusion_benefit(mock_get_pattern_table):
mod_txt = """
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
%2 = abs(%x);
%3 = nn.relu(%2);
%4 = add(%1, %3);
%5 = nn.relu(%4);
abs(%5)
}
"""
mod = tvm.parser.fromtext(mod_txt)
expected_txt = """
def @collage_example_target_hook_nn_relu_nn_relu_nn_relu_add_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], %FunctionVar_1: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu_nn_relu_nn_relu_add_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_04: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_04)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_03: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_03)
};
%3 = fn (%FunctionVar_05: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_05)
};
%4 = %2(%1);
%5 = %3(%FunctionVar_1);
%6 = fn (%FunctionVar_02: Tensor[(10, 10), float32], %FunctionVar_11: Tensor[(10, 10), float32], Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_02, %FunctionVar_11)
};
%7 = %6(%4, %5);
%8 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%Functi |
onVar_01)
};
%8(%7)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%9 = abs(%x);
%10 = @collage_example_target_hook_nn_relu_nn_relu_nn_relu_add_nn_relu(%x, %9);
abs(%10)
}
"""
expected_mod = tvm.parser.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 5,
"example_target_hook": 6,
}
)
run_collage(mod, targets, cost_estimator, expected_mod, tvm_max_depth=1, byoc_max_depth=5)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_double_residual(mock_get_pattern_table):
mod_txt = """
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = abs(%0);
%2 = add(%0, %1);
add(%1, %2)
}
"""
mod = tvm.parser.fromtext(mod_txt)
expected_txt = """
def @collage_example_target_hook_add_add(%FunctionVar_0: Tensor[(10, 10), float32], %FunctionVar_1: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_add_add") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_02: Tensor[(10, 10), float32], %FunctionVar_12: Tensor[(10, 10), float32], Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_02, %FunctionVar_12)
};
%1 = %0(%FunctionVar_1, %FunctionVar_0);
%2 = fn (%FunctionVar_01: Tensor[(10, 10), float32], %FunctionVar_11: Tensor[(10, 10), float32], Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_01, %FunctionVar_11)
};
%2(%FunctionVar_0, %1)
}
def @collage_example_target_hook_nn_relu(%FunctionVar_03: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu") -> Tensor[(10, 10), float32] {
%3 = fn (% |
FunctionVar_04: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_04)
};
%3(%FunctionVar_03)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%4 = @collage_example_target_hook_nn_relu(%x);
%5 = abs(%4);
@collage_example_target_hook_add_add(%5, %4)
}
"""
expected_mod = tvm.parser.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
}
)
run_collage(mod, targets, cost_estimator, expected_mod, tvm_max_depth=4, byoc_max_depth=4)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_pruning_heuristic(mock_get_pattern_table):
mod_txt = """
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
%2 = add(%0, %1);
add(%1, %2)
}
"""
mod = tvm.parser.fromtext(mod_txt)
expected_txt = """
def @collage_example_target_hook_nn_relu_nn_relu_add_add(
%FunctionVar_0: Tensor[(10, 10), float32],
Primitive=1,
Compiler="example_target_hook",
global_symbol="collage_example_target_hook_nn_relu_nn_relu_add_add") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_03: Tensor[(10, 10), float32] , Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_03)
};
%1 = %0(%FunctionVar_0) ;
%2 = fn (%FunctionVar_02: Tensor[(10, 10), float32] , Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%3 = %2(%1);
%4 = fn (%FunctionVar_04: Tensor[(10, 10), float32] , %FunctionVar_11: Tensor[(10, 10), float32] , Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_04, %Func |
tionVar_11)
};
%5 = %4(%1, %3);
%6 = fn (%FunctionVar_01: Tensor[(10, 10), float32] , %FunctionVar_1: Tensor[(10, 10), float32] , Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_01, %FunctionVar_1)
};
%6(%3, %5)
}
def @main(%x: Tensor[(10, 10), float32] ) -> Tensor[(10, 10), float32] {
@collage_example_target_hook_nn_relu_nn_relu_add_add(%x)
}
"""
expected_mod = tvm.parser.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
},
max_estimates=2,
)
run_collage(mod, targets, cost_estimator, expected_mod, tvm_max_depth=4, byoc_max_depth=4)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import relay
from tvm.relay |
import transform
def run_opt_pass(expr, opt_pass):
"runs the opt_pass on the expr of a function the function"
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = tvm.relay.transform.InferType()(mod)
mod = opt_pass(mod)
return mod["main"]
def test_combine_parallel_batch_matmul():
"""Simple testcase."""
def before(x, w1, w2, w3):
args = [x, w1, w2, w3]
y1 = relay.nn.batch_matmul(x, w1)
y2 = relay.nn.batch_matmul(x, w2)
y3 = relay.nn.batch_matmul(x, w3)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def expected(x, w1, w2, w3):
s1 = w1.type_annotation.shape[1]
s2 = w2.type_annotation.shape[1]
s3 = w3.type_annotation.shape[1]
args = [x, w1, w2, w3]
w = relay.concatenate((w1, w2, w3), axis=1)
y = relay.nn.batch_matmul(x, w)
y1 = relay.strided_slice(
y, begin=[0, 0, 0], end=[-1, -1, s1], strides=[1, 1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, 0, s1], end=[-1, -1, s2], strides=[1, 1, 1], slice_mode="size"
)
y3 = relay.strided_slice(
y, begin=[0, 0, s1 + s2], end=[-1, -1, s3], strides=[1, 1, 1], slice_mode="size"
)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def check(b, i, j, k):
x = relay.var("x", shape=(b, i, k))
w1 = relay.var("w1", shape=(b, j, k))
w2 = relay.var("w2", shape=(b, j, k))
w3 = relay.var("w3", shape=(b, j, k))
y_before = before(x, w1, w2, w3)
y = run_opt_pass(y_before, transform.CombineParallelBatchMatmul(min_num_branches=2))
y_expected = expected(x, w1, w2, w3)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(2, 3, 5, 4)
check(1, 100, 200, 300)
def test_combine_parallel_batch_matmul_biasadd():
"""S |
imple testcase with bias"""
def before(x, w1, w2, w3, b1, b2, b3):
args = [x, w1, w2, w3, b1, b2, b3]
y1 = relay.nn.batch_matmul(x, w1)
y2 = relay.nn.batch_matmul(x, w2)
y3 = relay.nn.batch_matmul(x, w3)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y3 = relay.add(y3, b3)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def expected(x, w1, w2, w3, b1, b2, b3):
s1 = w1.type_annotation.shape[1]
s2 = w2.type_annotation.shape[1]
s3 = w3.type_annotation.shape[1]
args = [x, w1, w2, w3, b1, b2, b3]
w = relay.concatenate((w1, w2, w3), axis=1)
b = relay.concatenate((b1, b2, b3), axis=-1)
y = relay.nn.batch_matmul(x, w)
y = relay.add(y, b)
y1 = relay.strided_slice(
y, begin=[0, 0, 0], end=[-1, -1, s1], strides=[1, 1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, 0, s1], end=[-1, -1, s2], strides=[1, 1, 1], slice_mode="size"
)
y3 = relay.strided_slice(
y, begin=[0, 0, s1 + s2], end=[-1, -1, s3], strides=[1, 1, 1], slice_mode="size"
)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def check(b, i, j, k):
x = relay.var("x", shape=(b, i, k))
w1 = relay.var("w1", shape=(b, j, k))
w2 = relay.var("w2", shape=(b, j, k))
w3 = relay.var("w3", shape=(b, j, k))
b1 = relay.var("b1", shape=(j,))
b2 = relay.var("b2", shape=(j,))
b3 = relay.var("b3", shape=(j,))
y_before = before(x, w1, w2, w3, b1, b2, b3)
y = run_opt_pass(y_before, transform.CombineParallelBatchMatmul(min_num_branches=2))
y_expected = expected(x, w1, w2, w3, b1, b2, b3)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(2, 3, 5, 4)
check(1, 100, 200, 300)
if __name__ == "__main__":
te |
st_combine_parallel_batch_matmul()
test_combine_parallel_batch_matmul_biasadd() |
import tvm
from tvm |
import relay
from tvm.relay |
import transform
def run_combine_parallel(expr, min_num_branches=3):
mod = tvm.IRModule.from_expr(expr)
mod = transform.CombineParallelConv2D(min_num_branches)(mod)
return mod["main"]
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = tvm.relay.transform.InferType()(mod)
mod = opt_pass(mod)
return mod["main"]
def test_combine_parallel_conv2d():
"""Simple testcase."""
def before(x, w1, w2, w3, w4):
args = [x, w1, w2, w3, w4]
y1 = relay.nn.conv2d(x, w1)
y2 = relay.nn.conv2d(x, w2)
y3 = relay.nn.conv2d(x, w3)
y4 = relay.nn.conv2d(x, w4)
y5 = relay.nn.max_pool2d(x)
y = relay.Tuple((y1, y2, y3, y4, y5))
return relay.Function(args, y)
def expected(x, w1, w2, w3, w4, channels1, channels2, channels3, channels4):
args = [x, w1, w2, w3, w4]
w = relay.concatenate((w1, w2, w4), axis=0)
y = relay.nn.conv2d(x, w, channels=channels1 + channels2 + channels4)
y1 = relay.strided_slice(
y, begin=[0, 0], end=[-1, channels1], strides=[1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, channels1], end=[-1, channels2], strides=[1, 1], slice_mode="size"
)
y3 = relay.nn.conv2d(x, w3)
y4 = relay.strided_slice(
y,
begin=[0, channels1 + channels2],
end=[-1, channels4],
strides=[1, 1],
slice_mode="size",
)
y5 = relay.nn.max_pool2d(x)
y = relay.Tuple((y1, y2, y3, y4, y5))
return relay.Function(args, y)
def check(x_shape, channels1, channels2, channels3, channels4):
x = relay.var("x", shape=x_shape)
in_c = x_shape[1]
w1 = relay.var("w1", shape=(channels1, in_c, 1, 1))
w2 = relay.var("w2", shape=(channels2, in_c, 1, 1))
w3 = relay.var("w3", shape=(channels3, in_c, 3, 3))
w4 = relay.var("w4", |
shape=(channels4, in_c, 1, 1))
y_before = before(x, w1, w2, w3, w4)
y = run_opt_pass(y_before, transform.CombineParallelConv2D(min_num_branches=2))
y_expected = expected(x, w1, w2, w3, w4, channels1, channels2, channels3, channels4)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert tvm.ir.structural_equal(y, y_expected, map_free_vars=True)
check((1, 4, 16, 16), 4, 4, 4, 4)
check((1, 4, 16, 16), 4, 8, 4, 7)
def test_combine_parallel_conv2d_scale_relu():
"""Testcase of combining conv2d + scale + relu"""
def before(x, w1, w2, scale1, scale2, bias):
args = [x, w1, w2, scale1, scale2, bias]
y1 = relay.nn.conv2d(x, w1)
y1 = relay.multiply(y1, scale1)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(x, w2)
y2 = relay.multiply(y2, scale2)
y2 = relay.nn.relu(y2)
y2 = relay.add(y2, bias)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, scale1, scale2, bias, channels1, channels2):
args = [x, w1, w2, scale1, scale2, bias]
w = relay.concatenate((w1, w2), axis=0)
scale = relay.concatenate((scale1, scale2), axis=0)
y = relay.nn.conv2d(x, w, channels=channels1 + channels2)
y = relay.multiply(y, scale)
y = relay.nn.relu(y)
y1 = relay.strided_slice(
y, begin=[0, 0], end=[-1, channels1], strides=[1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, channels1], end=[-1, channels2], strides=[1, 1], slice_mode="size"
)
y2 = relay.add(y2, bias)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(x_shape, channels1, channels2):
x = relay.var("x", shape=x_shape)
in_c = x_shape[1]
w1 = relay.var("w1", shape=(channels1, in_c, 1, 1))
w2 = relay.var("w2", shape=(channels2, in_c, 1, 1))
scale1 = relay.var("scale1", shape=(channels1, 1, 1))
scale2 = |
relay.var("scale2", shape=(channels2, 1, 1))
bias = relay.var("bias", shape=(channels2, 1, 1))
y_before = before(x, w1, w2, scale1, scale2, bias)
y = run_opt_pass(y_before, transform.CombineParallelConv2D(min_num_branches=2))
y_expected = expected(x, w1, w2, scale1, scale2, bias, channels1, channels2)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert tvm.ir.structural_equal(y, y_expected, map_free_vars=True)
check((1, 4, 16, 16), 4, 8)
def test_combine_parallel_conv2d_scale():
"""Testcase of un-combinable scale"""
def before(x, w1, w2, scale1, scale2):
args = [x, w1, w2, scale1, scale2]
y1 = relay.nn.conv2d(x, w1)
y1 = relay.multiply(y1, scale1)
y2 = relay.nn.conv2d(x, w2)
y2 = relay.multiply(y2, scale2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, scale1, scale2, channels1, channels2):
args = [x, w1, w2, scale1, scale2]
w = relay.concatenate((w1, w2), axis=0)
y = relay.nn.conv2d(x, w, channels=channels1 + channels2)
y1 = relay.strided_slice(
y, begin=[0, 0], end=[-1, channels1], strides=[1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, channels1], end=[-1, channels2], strides=[1, 1], slice_mode="size"
)
y1 = relay.multiply(y1, scale1)
y2 = relay.multiply(y2, scale2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(x_shape, channels1, channels2):
x = relay.var("x", shape=x_shape)
in_c = x_shape[1]
w1 = relay.var("w1", shape=(channels1, in_c, 1, 1))
w2 = relay.var("w2", shape=(channels2, in_c, 1, 1))
scale1 = relay.var("scale1", shape=(1,))
scale2 = relay.var("scale2", shape=(1,))
y_before = before(x, w1, w2, scale1, scale2)
y = run_opt_pass(y_before, transform.CombineParallelConv2D(min_num_branches=2))
y_expected = ex |
pected(x, w1, w2, scale1, scale2, channels1, channels2)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert tvm.ir.structural_equal(y, y_expected, map_free_vars=True)
check((1, 4, 16, 16), 4, 8)
def test_combine_parallel_conv2d_multiple_blocks():
def before(x, w, repeat):
args = [x, w]
y = x
for i in range(repeat):
y1 = relay.nn.conv2d(y, w)
y2 = relay.nn.conv2d(y, w)
y = relay.concatenate((y1, y2), axis=1)
return relay.Function(args, y)
def expected(x, w, channels, repeat):
args = [x, w]
y = x
for i in range(repeat):
w_concat = relay.concatenate((w, w), axis=0)
y = relay.nn.conv2d(y, w_concat, channels=channels * 2)
y1 = relay.strided_slice(
y, begin=[0, 0], end=[-1, channels], strides=[1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, channels], end=[-1, channels], strides=[1, 1], slice_mode="size"
)
y = relay.concatenate((y1, y2), axis=1)
return relay.Function(args, y)
def check(x_shape, repeat):
x = relay.var("x", shape=x_shape)
in_c = x_shape[1]
out_c = in_c
w = relay.var("w", shape=(out_c, in_c, 1, 1))
y_before = before(x, w, repeat)
y = run_opt_pass(y_before, transform.CombineParallelConv2D(min_num_branches=2))
y_expected = expected(x, w, out_c, repeat)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert tvm.ir.structural_equal(y, y_expected, map_free_vars=True)
check((1, 4, 16, 16), 4)
if __name__ == "__main__":
test_combine_parallel_conv2d()
test_combine_parallel_conv2d_scale_relu()
test_combine_parallel_conv2d_scale()
test_combine_parallel_conv2d_multiple_blocks() |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import transform
def run_combine_parallel(expr, min_num_branches=3, to_batch=True):
mod = tvm.IRModule.from_expr(expr)
mod = transform.CombineParallelDense(min_num_branches, to_batch)(mod)
return mod["main"]
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = tvm.relay.transform.InferType()(mod)
mod = opt_pass(mod)
return mod["main"]
def test_combine_parallel_dense():
"""Simple testcase. One dense cannot be combined due to shape mismatch"""
def before(x, w1, w2, w3, w4):
args = [x, w1, w2, w3, w4]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y3 = relay.nn.dense(x, w3)
y4 = relay.nn.dense(x, w4)
y = relay.Tuple((y1, y2, y3, y4))
return relay.Function(args, y)
def expected(x, w1, w2, w3, w4):
args = [x, w1, w2, w3, w4]
x_stacked = relay.stack((x, x, x), axis=0)
w = relay.stack((w1, w2, w4), axis=0)
y = relay.nn.batch_matmul(x_stacked, w)
(y1, y2, y4) = relay.split(y, 3)
y1 = relay.squeeze(y1, [0])
y2 = relay.squeeze(y2, [0])
y4 = relay.squeeze(y4, [0])
y3 = relay.nn.dense(x, w3)
y = relay.Tuple((y1, y2, y3, y4))
return relay.Function(args, y)
def check(i, j, k):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(j, k))
w3 = relay.var("w3", shape=(j + 1, k))
w4 = relay.var("w4", shape=(j, k))
y_before = before(x, w1, w2, w3, w4)
y = run_opt_pass(y_before, transform.CombineParallelDense(min_num_branches=2))
y_expected = expected(x, w1, w2, w3, w4)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4)
check(100, 200, 300)
def test_combine_parallel_dense_biasadd():
"""Testcase of combinin |
g dense + 1d biasadd"""
def before(x, w1, w2, b1, b2):
args = [x, w1, w2, b1, b2]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, b1, b2, is_2d_bias):
args = [x, w1, w2, b1, b2]
x_stacked = relay.stack((x, x), axis=0)
w = relay.stack((w1, w2), axis=0)
y = relay.nn.batch_matmul(x_stacked, w)
if not is_2d_bias:
b1 = relay.expand_dims(b1, 0)
b2 = relay.expand_dims(b2, 0)
b = relay.stack((b1, b2), axis=0)
y = relay.add(y, b)
(y1, y2) = relay.split(y, 2)
y1 = relay.squeeze(y1, [0])
y2 = relay.squeeze(y2, [0])
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(i, j, k, is_2d_bias):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(j, k))
if is_2d_bias:
b1 = relay.var("b1", shape=(i, j))
b2 = relay.var("b2", shape=(i, j))
else:
b1 = relay.var("b1", shape=(j,))
b2 = relay.var("b2", shape=(j,))
y_before = before(x, w1, w2, b1, b2)
y = run_opt_pass(y_before, transform.CombineParallelDense(min_num_branches=2))
y_expected = expected(x, w1, w2, b1, b2, is_2d_bias)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4, False)
check(100, 200, 300, False)
check(3, 5, 4, True)
check(100, 200, 300, True)
def test_combine_parallel_dense_biasadd_scale_reshape():
"""Testcase of combining dense + 1d biasadd + multiply with non-fused reshape"""
def before(x, w1, w2, b1, b2, scale1, scale2, newshape):
args = [x, w1, w2, b1, b2, scale1, scale2]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, |
w2)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y1 = relay.multiply(y1, scale1)
y2 = relay.multiply(y2, scale2)
y1 = relay.reshape(y1, newshape=newshape)
y2 = relay.reshape(y2, newshape=newshape)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, b1, b2, scale1, scale2, newshape):
args = [x, w1, w2, b1, b2, scale1, scale2]
x_stacked = relay.stack((x, x), axis=0)
w = relay.stack((w1, w2), axis=0)
y = relay.nn.batch_matmul(x_stacked, w)
b1 = relay.expand_dims(b1, 0)
b2 = relay.expand_dims(b2, 0)
b = relay.stack((b1, b2), axis=0)
y = relay.add(y, b)
scale1 = relay.expand_dims(scale1, 0)
scale2 = relay.expand_dims(scale2, 0)
scale = relay.stack((scale1, scale2), axis=0)
y = relay.multiply(y, scale)
(y1, y2) = relay.split(y, 2)
y1 = relay.squeeze(y1, [0])
y2 = relay.squeeze(y2, [0])
y1 = relay.reshape(y1, newshape=newshape)
y2 = relay.reshape(y2, newshape=newshape)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(i, j, k, scale1, scale2, newshape):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(j, k))
b1 = relay.var("b1", shape=(j,))
b2 = relay.var("b2", shape=(j,))
scale1 = relay.var("scale1", shape=(1,))
scale2 = relay.var("scale2", shape=(1,))
y_before = before(x, w1, w2, b1, b2, scale1, scale2, newshape)
y = run_opt_pass(y_before, transform.CombineParallelDense(min_num_branches=2))
y_expected = expected(x, w1, w2, b1, b2, scale1, scale2, newshape)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4, 0.5, 0.25, (1, 1, 15))
check(100, 200, 300, 0.5, 0.25, (1, 1, 20000))
def test_combine_parallel_dense_f |
lat():
"""Simple testcase. All matmul of different output dim can be combined"""
def before(x, w1, w2, w3):
args = [x, w1, w2, w3]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y3 = relay.nn.dense(x, w3)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def expected(x, w1, w2, w3, j):
args = [x, w1, w2, w3]
w_stacked = relay.concatenate((w1, w2, w3), axis=0)
y = relay.nn.dense(x, w_stacked, units=6 * j)
strides = [1, 1]
y1 = relay.strided_slice(y, begin=[0, 0], end=[-1, j], strides=strides, slice_mode="size")
y2 = relay.strided_slice(
y, begin=[0, j], end=[-1, 2 * j], strides=strides, slice_mode="size"
)
y3 = relay.strided_slice(
y, begin=[0, 3 * j], end=[-1, 3 * j], strides=strides, slice_mode="size"
)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def check(i, j, k):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(2 * j, k))
w3 = relay.var("w3", shape=(3 * j, k))
y_before = before(x, w1, w2, w3)
combine_pass = transform.CombineParallelDense(min_num_branches=3, to_batch=False)
y = run_opt_pass(y_before, combine_pass)
y_expected = expected(x, w1, w2, w3, j)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4)
check(100, 200, 300)
def test_combine_parallel_dense_flat_biasadd():
"""Testcase of combining dense + 1d biasadd with different out dims"""
def before(x, w1, w2, b1, b2):
args = [x, w1, w2, b1, b2]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, b1, b2, j, bias_shape1, bias_sh |
ape2):
args = [x, w1, w2, b1, b2]
w_stacked = relay.concatenate((w1, w2), axis=0)
y = relay.nn.dense(x, w_stacked, units=3 * j)
n_out_dims = max(len(bias_shape1), 2)
if len(bias_shape1) == 0:
b1 = relay.repeat(relay.expand_dims(b1, -1), j, 0)
elif bias_shape1[-1] == 1:
b1 = relay.repeat(b1, j, len(bias_shape1) - 1)
if len(bias_shape2) == 0:
b2 = relay.repeat(relay.expand_dims(b2, -1), 2 * j, 0)
elif bias_shape2[-1] == 1:
b2 = relay.repeat(b2, 2 * j, len(bias_shape2) - 1)
b = relay.concatenate((b1, b2), axis=max(0, len(bias_shape1) - 1))
y = relay.add(y, b)
begin = [0 for _ in range(n_out_dims - 1)]
end = [-1 for _ in range(n_out_dims - 1)]
strides = [1 for _ in range(n_out_dims)]
y1 = relay.strided_slice(
y, begin=begin + [0], end=end + [j], strides=strides, slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=begin + [j], end=end + [2 * j], strides=strides, slice_mode="size"
)
return relay.Function(args, relay.Tuple((y1, y2)))
def check(i, j, k, bias_shape1, bias_shape2):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(2 * j, k))
b1 = relay.var("b1", shape=bias_shape1)
b2 = relay.var("b2", shape=bias_shape2)
y_before = before(x, w1, w2, b1, b2)
combine_pass = transform.CombineParallelDense(min_num_branches=2, to_batch=False)
y = run_opt_pass(y_before, combine_pass)
y_expected = expected(x, w1, w2, b1, b2, j, bias_shape1, bias_shape2)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4, (), ())
check(3, 5, 4, (1,), (1,))
check(3, 5, 4, (5,), (1,))
check(3, 5, 4, (1,), (10,))
check(3, 5, 4, (3, 1), (3, 1))
check(3, 5, 4, (3, 5), (3, 10))
check(3 |
, 5, 4, (3, 1), (3, 10))
check(3, 5, 4, (3, 5), (3, 1))
check(3, 5, 4, (9, 3, 5), (9, 3, 10))
check(3, 5, 4, (9, 3, 5), (9, 3, 1))
check(3, 5, 4, (9, 3, 1), (9, 3, 10))
def test_combine_parallel_dense_flat_biasadd_scale_reshape():
"""Testcase of combining dense with different out dims
following bias add, scale, reshape ops
"""
def before(x, w1, w2, b1, b2, scale1, scale2, newshape1, newshape2):
args = [x, w1, w2, b1, b2, scale1, scale2]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y1 = relay.multiply(y1, scale1)
y2 = relay.multiply(y2, scale2)
y1 = relay.reshape(y1, newshape=newshape1)
y2 = relay.reshape(y2, newshape=newshape2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, b1, b2, scale1, scale2, newshape1, newshape2, j):
args = [x, w1, w2, b1, b2, scale1, scale2]
w_stacked = relay.concatenate((w1, w2), axis=0)
y = relay.nn.dense(x, w_stacked, units=3 * j)
b = relay.concatenate((b1, b2), axis=0)
y = relay.add(y, b)
scale1 = relay.repeat(scale1, j, 0)
scale2 = relay.repeat(scale2, 2 * j, 0)
scale = relay.concatenate((scale1, scale2), axis=0)
y = relay.multiply(y, scale)
strides = [1, 1]
y1 = relay.strided_slice(y, begin=[0, 0], end=[-1, j], strides=strides, slice_mode="size")
y2 = relay.strided_slice(
y, begin=[0, j], end=[-1, 2 * j], strides=strides, slice_mode="size"
)
y1 = relay.reshape(y1, newshape=newshape1)
y2 = relay.reshape(y2, newshape=newshape2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(i, j, k, scale1, scale2, newshape1, newshape2):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(2 * j, k))
b1 = relay.var("b1", shape=(j,))
b2 = |
relay.var("b2", shape=(2 * j,))
scale1 = relay.var("scale1", shape=(1,))
scale2 = relay.var("scale2", shape=(1,))
y_before = before(x, w1, w2, b1, b2, scale1, scale2, newshape1, newshape2)
combine_pass = transform.CombineParallelDense(min_num_branches=2, to_batch=False)
y = run_opt_pass(y_before, combine_pass)
y_expected = expected(x, w1, w2, b1, b2, scale1, scale2, newshape1, newshape2, j)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4, 0.5, 0.25, (1, 1, 15), (1, 1, 30))
check(100, 200, 300, 0.5, 0.25, (1, 1, 20000), (1, 1, 40000))
if __name__ == "__main__":
test_combine_parallel_dense()
test_combine_parallel_dense_biasadd()
test_combine_parallel_dense_biasadd_scale_reshape()
test_combine_parallel_dense_flat()
test_combine_parallel_dense_flat_biasadd()
test_combine_parallel_dense_flat_biasadd_scale_reshape() |
"""Test alter op layout pass""" |
import pytest |
import tvm
from tvm |
import relay, te
from tvm.relay |
import analysis, transform
from tvm.relay.op |
import op as reg
from tvm.relay.op |
import register_alter_op_layout
from tvm.relay.transform.infer_layout_utils |
import InferCorrectLayoutOutput
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_no_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
return before()
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_binary_no_convert_layout():
def before():
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(1, 2))
return relay.Function(
[x, y],
relay.qnn.op.add(
x,
y,
lhs_scale=relay.const(0.0156863, "float32"),
lhs_zero_point=relay.const(127, "int32"),
rhs_scale=relay.const(0.0117647, "float32"),
rhs_zero_point=relay.const(85, "int32"),
output_scale=relay.const(0.0235294, "float32"),
output_zero_point=relay.const(128, "int32"),
),
)
def expected():
return before()
a = before()
a = run_opt_pass(a, transform.ConvertLayout({}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn |
.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_nhwc_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NHWC", "NCHW" |
)
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_transpose_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d_transpose(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "IOHW")
y = relay.nn.conv2d_transpose(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d_transpose": ["NCHW", "IOHW"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_bias_pool_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.bias_add(y, bias, axis=3)
y = rel |
ay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC")
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
bias = relay.layout_transform(bias, "NHWC", "NCHW")
y = relay.add(y, bias)
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2))
y = relay.cast(y, "int32")
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_bias_pool_uses_specified_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.bias_add(y, bias, axis=3)
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC")
y = relay.cast(y, "int32")
y = relay.nn.batch_flatte |
n(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
bias = relay.layout_transform(bias, "NHWC", "NCHW")
y = relay.add(y, bias)
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC", out_layout="NHWC")
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(
a,
transform.ConvertLayout({"nn.conv2d": ["NCHW", "OIHW"], "nn.max_pool2d": ["NHWC"]}),
)
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
def test_conv_concat_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
ret = relay.concatenate([y, y1], axis=3)
y = relay.Function(analysis.free_vars( |
ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(y, weight1, channels=64, kernel_size=(3, 3), padding=(1, 1))
y1 = relay.nn.conv2d(y, weight2, channels=64, kernel_size=(3, 3), padding=(1, 1))
ret = relay.concatenate([y, y1], axis=1)
ret = relay.layout_transform(ret, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_deformable_conv_bias_pool_convert_layout():
def before(N, CI, H, W, CO, KH, KW, layout):
if layout == "NCHW":
data_shape = (N, CI, H, W)
weight_shape = (CO, CI, KH, KW)
kernel_layout = "OIHW"
else:
data_shape = (N, H, W, CI)
weight_shape = (KH, KW, CI, CO)
kernel_layout = "HWIO"
bias_shape = (CO,)
data = relay.var("data", shape=data_shape, dtype="float32")
offset = relay.var("offset")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout,
kernel_layout=kernel_layout,
)
y = relay.nn.bias_add(y, bias, axis=-1 if layout == "NHWC" else 1)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), |
layout=layout)
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected(N, CI, H, W, CO, KH, KW, OH, OW, src_layout, dst_layout):
layout_map = {"src": {}, "dst": {}}
if src_layout == "NCHW":
nchw = layout_map["src"]
nhwc = layout_map["dst"]
else:
nchw = layout_map["dst"]
nhwc = layout_map["src"]
nchw["data_layout"] = "NCHW"
nchw["data_shape"] = (N, CI, H, W)
nchw["offset_shape"] = (N, KH * KW * 2, OH, OW)
nchw["weight_shape"] = (CO, CI, KH, KW)
nchw["kernel_layout"] = "OIHW"
nhwc["data_layout"] = "NHWC"
nhwc["data_shape"] = (N, H, W, CI)
nhwc["offset_shape"] = (N, OH, OW, KH * KW * 2)
nhwc["weight_shape"] = (KH, KW, CI, CO)
nhwc["kernel_layout"] = "HWIO"
bias_shape = (CO,)
data = relay.var("data", shape=layout_map["src"]["data_shape"], dtype="float32")
offset = relay.var("offset", shape=layout_map["src"]["offset_shape"], dtype="float32")
weight = relay.var("weight", shape=layout_map["src"]["weight_shape"], dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
data = relay.layout_transform(
data, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
offset = relay.layout_transform(
offset, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
weight = relay.layout_transform(
weight, layout_map["src"]["kernel_layout"], layout_map["dst"]["kernel_layout"]
)
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout_map["dst"]["data_layout"],
kernel_layout=layout_map["dst"]["kernel_layout"],
)
if layout_map["src"]["data_layout"] == "N |
HWC":
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
else:
bias = relay.expand_dims(bias, axis=1, num_newaxis=2)
bias = relay.expand_dims(bias, axis=0)
bias = relay.layout_transform(
bias, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
y = relay.add(y, bias)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout_map["dst"]["data_layout"])
y = relay.cast(y, "int32")
y = relay.layout_transform(
y, layout_map["dst"]["data_layout"], layout_map["src"]["data_layout"]
)
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before(1, 3, 224, 224, 32, 3, 3, "NHWC")
a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NCHW", "default"]}))
b = run_opt_pass(
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NHWC", "NCHW"), transform.InferType()
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
a = before(1, 3, 224, 224, 32, 3, 3, "NCHW")
a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NHWC", "default"]}))
b = run_opt_pass(
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NCHW", "NHWC"), transform.InferType()
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_deformable_conv_bias_pool_uses_specified_convert_layout():
def before(N, CI, H, W, CO, KH, KW, layout):
if layout == "NCHW":
data_shape = (N, CI, H, W)
weight_shape = (CO, CI, KH, KW)
kernel_layout = "OIHW"
else:
data_shape = (N, H, W, CI)
weight_shape = (KH, KW, CI, CO)
kernel_layout = "HWIO"
bias_shape = (CO,)
data = relay.var("data", shape=data_shape, dtype="float32")
offset = relay.var("offset")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
b |
ias = relay.var("bias", shape=bias_shape, dtype="float32")
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout,
kernel_layout=kernel_layout,
)
y = relay.nn.bias_add(y, bias, axis=-1 if layout == "NHWC" else 1)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout)
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected(N, CI, H, W, CO, KH, KW, OH, OW, src_layout, dst_layout, max_pool_layout=None):
layout_map = {"src": {}, "dst": {}}
if src_layout == "NCHW":
nchw = layout_map["src"]
nhwc = layout_map["dst"]
else:
nchw = layout_map["dst"]
nhwc = layout_map["src"]
nchw["data_layout"] = "NCHW"
nchw["data_shape"] = (N, CI, H, W)
nchw["offset_shape"] = (N, KH * KW * 2, OH, OW)
nchw["weight_shape"] = (CO, CI, KH, KW)
nchw["kernel_layout"] = "OIHW"
nhwc["data_layout"] = "NHWC"
nhwc["data_shape"] = (N, H, W, CI)
nhwc["offset_shape"] = (N, OH, OW, KH * KW * 2)
nhwc["weight_shape"] = (KH, KW, CI, CO)
nhwc["kernel_layout"] = "HWIO"
bias_shape = (CO,)
data = relay.var("data", shape=layout_map["src"]["data_shape"], dtype="float32")
offset = relay.var("offset", shape=layout_map["src"]["offset_shape"], dtype="float32")
weight = relay.var("weight", shape=layout_map["src"]["weight_shape"], dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
data = relay.layout_transform(
data, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
offset = relay.layout_transform(
offset, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
) |
weight = relay.layout_transform(
weight, layout_map["src"]["kernel_layout"], layout_map["dst"]["kernel_layout"]
)
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout_map["dst"]["data_layout"],
kernel_layout=layout_map["dst"]["kernel_layout"],
)
if layout_map["src"]["data_layout"] == "NHWC":
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
else:
bias = relay.expand_dims(bias, axis=1, num_newaxis=2)
bias = relay.expand_dims(bias, axis=0)
bias = relay.layout_transform(
bias, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
y = relay.add(y, bias)
y = relay.nn.relu(y)
if max_pool_layout != layout_map["dst"]["data_layout"]:
y = relay.layout_transform(y, layout_map["dst"]["data_layout"], max_pool_layout)
y = relay.nn.max_pool2d(
y, pool_size=(2, 2), layout=max_pool_layout, out_layout=max_pool_layout
)
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before(1, 3, 224, 224, 32, 3, 3, "NHWC")
a = run_opt_pass(
a,
transform.ConvertLayout(
{"nn.deformable_conv2d": ["NCHW", "default"], "nn.max_pool2d": ["NHWC"]}
),
)
b = run_opt_pass(
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NHWC", "NCHW", max_pool_layout="NHWC"),
transform.InferType(),
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
a = before(1, 3, 224, 224, 32, 3, 3, "NCHW")
a = run_opt_pass(
a,
transform.ConvertLayout(
{"nn.deformable_conv2d": ["NHWC", "default"], "nn.max_pool2d": ["NCHW"]}
),
)
b = run_opt_pass( |
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NCHW", "NHWC", max_pool_layout="NCHW"),
transform.InferType(),
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
def test_dual_path_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(3, 3, 32, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(
y,
weight2,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.batch_flatten(y)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(3, 3, 32, 32))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1))
y1 = relay.nn.relu(y1)
y1 = relay.layout_transform(y1, "NCHW", "NHWC")
y2 = relay.layout_transform(y, "NCHW", "NHWC")
y2 = relay.nn.batch_flatten(y2)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transfo |
rm.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_bn_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
gamma = relay.var("gamma")
beta = relay.var("beta")
mean = relay.var("mean")
variance = relay.var("variance")
y, _, _ = relay.nn.batch_norm(y, gamma, beta, mean, variance, axis=3)
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
has_lt = list()
find_op = lambda x: has_lt.append(
isinstance(x, tvm.relay.expr.Call)
and x.op.name == "layout_transform"
and x.attrs.src_layout == "NCHW"
and x.attrs.dst_layout == "NHWC"
)
relay.analysis.post_order_visit(a, find_op)
has_lt = list(filter(lambda x: x, has_lt))
assert len(has_lt) == 1
def test_slice_like_convert_layout():
def verify_slice_like(after, expected_axes):
has_expected = list()
checker = lambda x: has_expected.append(
isinstance(x, tvm.relay.expr.Call)
and x.op.name == "slice_like"
and str(x.attrs.axes) == str(expected_axes)
)
relay.analysis.post_order_visit(after, checker)
assert any(has_expected)
def func_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC", |
kernel_layout="HWIO",
)
out = relay.slice_like(y, y, axes=[1, 2])
return relay.Function(analysis.free_vars(out), out)
after = run_opt_pass(func_nhwc(), transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
verify_slice_like(after, [2, 3])
def func_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(32, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
out = relay.slice_like(y, y, axes=[2, 3])
return relay.Function(analysis.free_vars(out), out)
after = run_opt_pass(func_nchw(), transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
verify_slice_like(after, [1, 2])
def func_vars():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
z = relay.var("y", shape=(1, 56, 56, 32))
out = relay.slice_like(y, z, axes=[1, 2])
return relay.Function(analysis.free_vars(out), out)
after = run_opt_pass(func_vars(), transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
verify_slice_like(after, [1, 2])
def test_transpose_convert_layout():
def verify_transpose(after, expected_axes, expected_transform_cnt):
has_expected = list()
checker = lambda x: has_expected.append(
isinstance(x, tvm.relay.expr.Call)
and x.op.name == "transpose"
and str(x.attrs.axes) == str(expected_axes)
)
relay.analysis.post_order_visit(after, checker)
assert any(has_expected), after
is_transform = list()
check |
er = lambda x: is_transform.append(
1 if isinstance(x, tvm.relay.expr.Call) and x.op.name == "layout_transform" else 0
)
relay.analysis.post_order_visit(after, checker)
assert (
sum(is_transform) == expected_transform_cnt
), "Expected %s layout_transform, but get\n%s" % (expected_transform_cnt, after)
def nhwc_to_nchw():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
z = relay.var("z", shape=(56, 56, 32))
out = relay.add(y, z)
out = relay.transpose(out, axes=[0, 3, 1, 2])
out = relay.nn.batch_flatten(out)
func = relay.Function(analysis.free_vars(out), out)
return run_opt_pass(func, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
verify_transpose(nhwc_to_nchw(), [0, 1, 2, 3], 3)
def nchw_to_nhwc():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(32, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
z = relay.var("z", shape=(32, 56, 56))
out = relay.add(y, z)
out = relay.transpose(out, axes=[0, 2, -1, 1])
out = relay.nn.batch_flatten(out)
func = relay.Function(analysis.free_vars(out), out)
return run_opt_pass(func, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
verify_transpose(nchw_to_nhwc(), [0, 1, 2, 3], 3)
def default_axes():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(32, 64, 3, 3))
y = relay.nn.conv2d(
x,
weig |
ht1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
z = relay.var("z", shape=(32, 56, 56))
out = relay.add(y, z)
out = relay.transpose(out)
func = relay.Function(analysis.free_vars(out), out)
return run_opt_pass(func, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
verify_transpose(default_axes(), [2, 1, 3, 0], 3)
def test_resnet_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(1, 1, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(
x, weight2, channels=32, kernel_size=(1, 1), data_layout="NHWC", kernel_layout="HWIO"
)
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y, layout="NHWC")
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(1, 1, 64, 32))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1))
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
return relay.Function(analysis.free_vars(y), y) |
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_resnet_pool_uses_specified_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(1, 1, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(
x, weight2, channels=32, kernel_size=(1, 1), data_layout="NHWC", kernel_layout="HWIO"
)
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y, layout="NHWC")
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(1, 1, 64, 32))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1))
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.nn.global_max_pool2d(y, layout="NHWC", out_layout="NHWC")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(
a,
transform.ConvertLayout(
{"nn.conv2d": ["NCHW", "default"], "nn.global_max_pool2d": ["NHWC"]}
),
)
b = run_opt_pass(expected(), tr |
ansform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
def test_scalar_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.add(y, relay.const(1, "float32"))
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.add(y, relay.const(1.0, "float32"))
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_ln_convert_layout():
"""Check that layout transforms are propagated through ln."""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
y = relay.nn.layer_norm(y, gamma, beta, axis=3)
y = relay.Function( |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.