text
stringlengths 1
2.05k
|
---|
import relay
from tvm.contrib |
import graph_executor
from tvm.relay |
import transform, analysis
from tvm.relay.testing.temp_op_attr |
import TempOpAttr
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
@tvm.testing.uses_gpu
def test_legalize_conv2d_NHWC():
"""test legalize NHWC conv2d to enable tensorcore"""
def _test_legalize_conv2d(data_shape, kernel_shape, pad_shape, dtype, do_pad=True):
out_channel = kernel_shape[3]
out_shape = list(data_shape)
out_shape[3] = out_channel
db, di, do = pad_shape
def before():
x = relay.var("x", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d(
x,
weight,
channels=out_channel,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.Function([x, weight], y)
return y
def legalize_conv2d(attrs, inputs, types):
with tvm.target.Target("cuda"):
return topi.nn.conv2d_legalize(attrs, inputs, types)
def expected():
if not do_pad:
return before()
x = relay.var("x", shape=data_shape, dtype=dtype)
if db or di:
x_pad = relay.nn.pad(x, pad_width=((0, db), (0, 0), (0, 0), (0, di)))
else:
x_pad = x
weight = relay.var("weight", shape=(kernel_shape), dtype=dtype)
if di or do:
weight_pad = relay.nn.pad(weight, pad_width=((0, 0), (0, 0), (0, di), (0, do)))
else:
weight_pad = weight
y_pad = relay.nn.conv2d(
x_pad,
weight=weight_pad,
channels=out_chann |
el + do,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
if db or do:
y = relay.strided_slice(y_pad, begin=[0, 0, 0, 0], end=out_shape)
else:
y = y_pad
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "Expected = \n" + str(b)
for dtype in ["float16", "int8", "int4"]:
_test_legalize_conv2d((7, 16, 16, 64), (3, 3, 64, 64), (1, 0, 0), dtype)
_test_legalize_conv2d((3, 16, 16, 64), (3, 3, 64, 64), (5, 0, 0), dtype)
_test_legalize_conv2d((2, 16, 16, 64), (3, 3, 64, 64), (0, 0, 0), dtype, False)
_test_legalize_conv2d((8, 16, 16, 63), (3, 3, 63, 64), (0, 1, 0), dtype)
_test_legalize_conv2d((8, 16, 16, 33), (3, 3, 33, 64), (0, 15, 0), dtype)
_test_legalize_conv2d((8, 16, 16, 13), (3, 3, 13, 64), (0, 3, 0), dtype)
_test_legalize_conv2d((8, 16, 16, 1), (3, 3, 1, 64), (0, 0, 0), dtype, False)
_test_legalize_conv2d((8, 16, 16, 64), (3, 3, 64, 63), (0, 0, 1), dtype)
_test_legalize_conv2d((8, 16, 16, 64), (3, 3, 64, 33), (0, 0, 31), dtype)
_test_legalize_conv2d((8, 16, 16, 64), (3, 3, 64, 1), (0, 0, 0), dtype, False)
@tvm.testing.uses_gpu
def test_legalize_conv2d_HWNC():
"""test legalize HWNC conv2d to enable tensorcore"""
def _test_legalize_conv2d(data_shape, kernel_shape, pad_shape, dtype, do_pad=True):
out_channel = kernel_shape[2]
out_shape = list(data_shape)
out_shape[3] = out_channel
db, di, do = pad_shape
def before():
x = relay.var("x", shape=data_shape, dtype=dtype)
weight = |
relay.var("weight", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d(
x,
weight,
channels=out_channel,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="HWNC",
kernel_layout="HWOI",
)
y = relay.Function([x, weight], y)
return y
def legalize_conv2d(attrs, inputs, types):
with tvm.target.Target("cuda"):
return topi.nn.conv2d_legalize(attrs, inputs, types)
def expected():
if not do_pad:
return before()
x = relay.var("x", shape=data_shape, dtype=dtype)
if db or di:
x_pad = relay.nn.pad(x, pad_width=((0, 0), (0, 0), (0, db), (0, di)))
else:
x_pad = x
weight = relay.var("weight", shape=(kernel_shape), dtype=dtype)
if di or do:
weight_pad = relay.nn.pad(weight, pad_width=((0, 0), (0, 0), (0, do), (0, di)))
else:
weight_pad = weight
y_pad = relay.nn.conv2d(
x_pad,
weight=weight_pad,
channels=out_channel + do,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="HWNC",
kernel_layout="HWOI",
)
if db or do:
y = relay.strided_slice(y_pad, begin=[0, 0, 0, 0], end=out_shape)
else:
y = y_pad
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "Expected = \n" + str(b)
_test_legalize_conv2d((16, 16, 7, 64), (3, 3, 64, 64), (1, 0, 0), "int8")
_test_legalize_conv2d((16, |
16, 3, 64), (3, 3, 64, 64), (5, 0, 0), "int8")
_test_legalize_conv2d((2, 16, 16, 64), (3, 3, 64, 64), (0, 0, 0), "int8", False)
_test_legalize_conv2d((16, 16, 7, 64), (3, 3, 64, 64), (1, 0, 0), "int4")
_test_legalize_conv2d((16, 16, 3, 64), (3, 3, 64, 64), (5, 0, 0), "int4")
_test_legalize_conv2d((2, 16, 16, 64), (3, 3, 64, 64), (0, 0, 0), "int4", False)
_test_legalize_conv2d((16, 16, 8, 63), (3, 3, 64, 63), (0, 1, 0), "int8")
_test_legalize_conv2d((16, 16, 8, 33), (3, 3, 64, 33), (0, 15, 0), "int8")
_test_legalize_conv2d((16, 16, 8, 13), (3, 3, 64, 13), (0, 3, 0), "int8")
_test_legalize_conv2d((16, 16, 8, 1), (3, 3, 64, 1), (0, 0, 0), "int8", False)
_test_legalize_conv2d((16, 16, 8, 63), (3, 3, 64, 63), (0, 1, 0), "int4")
_test_legalize_conv2d((16, 16, 8, 33), (3, 3, 64, 33), (0, 31, 0), "int4")
_test_legalize_conv2d((16, 16, 8, 13), (3, 3, 64, 13), (0, 19, 0), "int4")
_test_legalize_conv2d((16, 16, 8, 1), (3, 3, 64, 1), (0, 0, 0), "int4", False)
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 63, 64), (0, 0, 1), "int8")
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 33, 64), (0, 0, 31), "int8")
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 1, 64), (0, 0, 0), "int8", False)
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 63, 64), (0, 0, 1), "int4")
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 33, 64), (0, 0, 7), "int4")
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 1, 64), (0, 0, 0), "int4", False)
@tvm.testing.uses_gpu
def test_legalize_dense():
def _test_legalize_dense(data_shape, kernel_shape, pad_shape, dtype, do_pad=True, units=None):
"""test legalize dense to enable tensorcore"""
M, K = data_shape
N, _ = kernel_shape
out_shape = (M, N)
dm, dk, dn = pad_shape
def before():
x = relay.var("x", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=kernel_shape, dtype=dtype)
y = relay.nn.dense(x, weight, units)
y = relay. |
Function([x, weight], y)
return y
def legalize_dense(attrs, inputs, types):
with tvm.target.Target("cuda"):
return topi.nn.dense_legalize(attrs, inputs, types)
def expected():
if not do_pad:
return before()
x = relay.var("x", shape=data_shape, dtype=dtype)
if dm or dk:
x_pad = relay.nn.pad(x, pad_width=((0, dm), (0, dk)))
else:
x_pad = x
weight = relay.var("weight", shape=(kernel_shape), dtype=dtype)
if dn or dk:
weight_pad = relay.nn.pad(weight, pad_width=((0, dn), (0, dk)))
else:
weight_pad = weight
y_pad = relay.nn.dense(x_pad, weight_pad, units=N + dn if units else None)
if dm or dn:
y = relay.strided_slice(y_pad, begin=[0, 0], end=out_shape)
else:
y = y_pad
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.dense", "FTVMLegalize", legalize_dense):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "Expected = \n" + str(b)
for dtype in ["float16", "int8"]:
_test_legalize_dense((8, 16), (32, 16), (0, 0, 0), dtype, False)
_test_legalize_dense((7, 16), (32, 16), (1, 0, 0), dtype)
_test_legalize_dense((8, 15), (32, 15), (0, 1, 0), dtype)
_test_legalize_dense((8, 16), (31, 16), (0, 0, 1), dtype)
_test_legalize_dense((7, 15), (31, 15), (1, 1, 1), dtype)
_test_legalize_dense((3, 16), (32, 16), (5, 0, 0), dtype)
_test_legalize_dense((1, 16), (32, 16), (0, 0, 0), dtype, False)
_test_legalize_dense((8, 16), (30, 16), (0, 0, 2), "float16", units=30)
_test_legalize_dense((8, 32), (32, 32), (0, 0, 0), "int4", False)
_test_legalize_dense((7, 32), (3 |
2, 32), (1, 0, 0), "int4")
_test_legalize_dense((8, 31), (32, 31), (0, 1, 0), "int4")
_test_legalize_dense((8, 32), (31, 32), (0, 0, 1), "int4")
_test_legalize_dense((7, 31), (31, 31), (1, 1, 1), "int4")
_test_legalize_dense((3, 32), (32, 32), (5, 0, 0), "int4")
_test_legalize_dense((8, 16), (32, 16), (0, 16, 0), "int4")
_test_legalize_dense((2, 16), (32, 16), (0, 0, 0), "int4", False)
@tvm.testing.uses_gpu
def test_legalize_batch_matmul():
def _test_legalize_batch_matmul(data_shape, kernel_shape, pad_shape, dtype, do_pad=True):
"""test legalize dense to enable tensorcore"""
B, M, _ = data_shape
_, N, _ = kernel_shape
out_shape = (B, M, N)
dm, dk, dn = pad_shape
def before():
x = relay.var("x", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=kernel_shape, dtype=dtype)
y = relay.nn.batch_matmul(x, weight)
y = relay.Function([x, weight], y)
return y
def legalize_batch_matmul(attrs, inputs, types):
with tvm.target.Target("cuda"):
return topi.nn.batch_matmul_legalize(attrs, inputs, types)
def expected():
if not do_pad:
return before()
x = relay.var("x", shape=data_shape, dtype=dtype)
if dm or dk:
x_pad = relay.nn.pad(x, pad_width=((0, 0), (0, dm), (0, dk)))
else:
x_pad = x
weight = relay.var("weight", shape=(kernel_shape), dtype=dtype)
if dn or dk:
weight_pad = relay.nn.pad(weight, pad_width=((0, 0), (0, dn), (0, dk)))
else:
weight_pad = weight
y_pad = relay.nn.batch_matmul(
x_pad,
weight_pad,
)
if dm or dn:
y = relay.strided_slice(y_pad, begin=[0, 0, 0], end=out_shape)
else:
y = y_pad
y = relay.Function([x, weight], y)
ret |
urn y
with TempOpAttr("nn.batch_matmul", "FTVMLegalize", legalize_batch_matmul):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "Expected = \n" + str(b)
for dtype in ["float16", "int8"]:
_test_legalize_batch_matmul((16, 8, 16), (16, 32, 16), (0, 0, 0), dtype, False)
_test_legalize_batch_matmul((16, 7, 16), (16, 32, 16), (1, 0, 0), dtype)
_test_legalize_batch_matmul((16, 8, 15), (16, 32, 15), (0, 1, 0), dtype)
_test_legalize_batch_matmul((16, 8, 16), (16, 31, 16), (0, 0, 1), dtype)
_test_legalize_batch_matmul((16, 7, 15), (16, 31, 15), (1, 1, 1), dtype)
_test_legalize_batch_matmul((16, 3, 16), (16, 32, 16), (5, 0, 0), dtype)
_test_legalize_batch_matmul((16, 2, 16), (16, 32, 16), (0, 0, 0), dtype, False)
_test_legalize_batch_matmul((16, 8, 32), (16, 32, 32), (0, 0, 0), "int4", False)
_test_legalize_batch_matmul((16, 7, 32), (16, 32, 32), (1, 0, 0), "int4")
_test_legalize_batch_matmul((16, 8, 31), (16, 32, 31), (0, 1, 0), "int4")
_test_legalize_batch_matmul((16, 8, 32), (16, 31, 32), (0, 0, 1), "int4")
_test_legalize_batch_matmul((16, 7, 31), (16, 31, 31), (1, 1, 1), "int4")
_test_legalize_batch_matmul((16, 3, 32), (16, 32, 32), (5, 0, 0), "int4")
_test_legalize_batch_matmul((16, 8, 16), (16, 32, 16), (0, 16, 0), "int4")
_test_legalize_batch_matmul((16, 2, 16), (16, 32, 16), (0, 0, 0), "int4", False)
if __name__ == "__main__":
test_legalize_conv2d_NHWC()
test_legalize_conv2d_HWNC()
test_legalize_dense()
test_legalize_batch_matmul() |
"""Unit tests for MAC counter.""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import analysis, transform
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = tvm.relay.transform.InferType()(mod)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_gemm():
n = 512
k = 1024
m = 256
dshape1 = (n, k)
dshape2 = (m, k)
data1 = relay.var("data1", shape=dshape1)
data2 = relay.var("data2", shape=dshape2)
gemm = relay.nn.dense(data1, data2)
func = relay.Function([data1, data2], relay.Tuple(tvm.runtime.convert([gemm])))
func = run_opt_pass(func, transform.InferType())
compute_count = analysis.get_total_mac_number(func)
expect_count = n * m * k
assert compute_count == expect_count
def test_conv():
batch_size = 1
input_channel = 3
h = 224
w = 224
output_channel = 64
kh = 7
kw = 7
h_padding = 1
w_padding = 1
oh = h + h_padding * 2 - kh + 1
ow = w + w_padding * 2 - kw + 1
dshape = (batch_size, input_channel, h, w)
weight = relay.var("weight", shape=(output_channel, input_channel, kh, kw))
data = relay.var("data", shape=dshape)
conv2d = relay.nn.conv2d(
data, weight, channels=output_channel, kernel_size=(kh, kw), padding=(h_padding, w_padding)
)
func = relay.Function([data, weight], relay.Tuple(tvm.runtime.convert([conv2d])))
func = run_opt_pass(func, transform.InferType())
compute_count = analysis.get_total_mac_number(func)
expect_count = batch_size * input_channel * oh * ow * output_channel * kh * kw
assert compute_count == expect_count
def test_simple_network():
batch_size = 1
dshape = (batch_size, 64, 56, 56)
weight_conv = relay.var("weight_conv", shape=(64, 64, 3, 3))
data1 = relay.var("data1", shape=dshape)
data2 = relay.var("data2", shape=dshape)
weight_dense = relay.var("weight_dense", shape=(1, 56 * 56 * 64))
conv2d_1 = relay.nn.conv2d(data1, weight_conv, channels=64 |
, kernel_size=(3, 3), padding=(1, 1))
conv2d_2 = relay.nn.conv2d(data2, weight_conv, channels=64, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d_1, conv2d_2)
flattened = relay.nn.batch_flatten(add)
dense_1 = relay.nn.dense(flattened, weight_dense)
func = relay.Function(
[data1, data2, weight_conv, weight_dense],
relay.Tuple(tvm.runtime.convert([conv2d_1, conv2d_2, dense_1, add, flattened])),
)
func = run_opt_pass(func, transform.AlterOpLayout())
compute_count = analysis.get_total_mac_number(func)
expect_count = 231411712
assert compute_count == expect_count
def test_depthwise_conv2d():
batch_size = 1
dshape = (batch_size, 64, 56, 56)
weight_conv = relay.var("weight_depthwiseconv", shape=(64, 1, 3, 3))
data1 = relay.var("data1", shape=dshape)
data2 = relay.var("data2", shape=dshape)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight_conv, kernel_size=(3, 3), padding=(1, 1), groups=64
)
depthwise_conv2d_2 = relay.nn.conv2d(
data2, weight_conv, kernel_size=(3, 3), padding=(1, 1), groups=64
)
add = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
func = relay.Function(
[data1, data2, weight_conv],
relay.Tuple(tvm.runtime.convert([depthwise_conv2d_1, depthwise_conv2d_2, add])),
)
func = run_opt_pass(func, transform.InferType())
compute_count = analysis.get_total_mac_number(func)
assert compute_count == 2 * np.prod(dshape) * 3 * 3
def test_conv_2d_transpose():
batch_size = 1
input_channel = 3
h = 224
w = 224
output_channel = 64
kh = 7
kw = 7
h_padding = 1
w_padding = 1
oh = h - h_padding * 2 + kh - 1
ow = w - w_padding * 2 + kw - 1
dshape = (batch_size, input_channel, h, w)
weight = relay.var("weight", shape=(input_channel, output_channel, kh, kw))
data = relay.var("data", shape=dshape)
conv2d_transpose = relay.nn.conv2d_transpose(
data, weight, channels=output_channel, kernel_size=( |
kh, kw), padding=(h_padding, w_padding)
)
func = relay.Function([data, weight], relay.Tuple(tvm.runtime.convert([conv2d_transpose])))
func = run_opt_pass(func, transform.InferType())
compute_count = analysis.get_total_mac_number(func)
expect_count = batch_size * input_channel * oh * ow * output_channel * kh * kw
assert compute_count == expect_count
if __name__ == "__main__":
test_conv()
test_gemm()
test_simple_network()
test_depthwise_conv2d()
test_conv_2d_transpose() |
"""Unit tests for relay pass manager.""" |
import numpy as np |
import pytest |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import ExprFunctor
from tvm.relay |
import Function, Call
from tvm.relay |
import analysis
from tvm.relay |
import transform as _transform
from tvm.ir |
import instrument as _instrument
from tvm.relay.testing |
import run_infer_type |
import tvm.testing
def get_var_func():
shape = (5, 10)
tp = relay.TensorType(shape, "float32")
x = relay.var("x", tp)
gv = relay.GlobalVar("myAbs")
func = relay.Function([x], relay.abs(x))
return gv, func
def extract_var_func(mod, name):
var = mod.get_global_var(name)
func = mod[var]
return var, func
def update_func(func): |
class DoubleValues(ExprFunctor):
def __init__(self):
ExprFunctor.__init__(self)
def visit_constant(self, const):
return relay.add(const, const)
def visit_var(self, var):
return relay.add(var, var)
def visit_call(self, call):
new_op = self.visit(call.op)
new_args = [self.visit(arg) for arg in call.args]
return Call(new_op, new_args, call.attrs)
def visit_global_var(self, gvar):
return gvar
def visit_op(self, op):
return op
def visit_function(self, fn):
new_body = self.visit(fn.body)
return Function(list(fn.params), new_body, fn.ret_type, fn.type_params, fn.attrs)
double_value = DoubleValues()
return double_value.visit(func)
class OptTester:
"""A helper class for testing the pass manager."""
def __init__(self, mod):
if not isinstance(mod, tvm.IRModule):
raise TypeError("mod is expected to be the type of " "tvm.IRModule")
self.mod = mod
def analysis(self):
"""Perform analysis for the current module."""
pass
@staticmethod
def transform(node, ctx=None):
"""Perform optimization on node."""
if isinstance(node, tvm.IRModule):
gv, func = get_var_func()
mod = tvm.IRModule({gv: func})
mod.update(node)
return mod
if isinstance(node, relay.Function):
return update_func(node)
raise TypeError("Found not supported node type.")
def get_rand(shape, dtype="float32"):
return tvm.nd.array(np.random.rand(*shape).astype(dtype))
def check_func(func, ref_func):
func = run_infer_type(func)
ref_func = run_infer_type(ref_func)
assert tvm.ir.structural_equal(func, ref_func)
@tvm.testing.uses_gpu
def test_module_pass():
shape = (5, 10)
dtype = "float32"
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
y = relay.var("y", tp)
v_ad |
d = relay.GlobalVar("myAdd")
func = relay.Function([x, y], x + y)
mod = tvm.IRModule({v_add: func})
pass_name = "module_pass_test"
opt_level = 0
opt_tester = OptTester(mod)
pass_ctx = None
@tvm.transform.module_pass(opt_level=opt_level, name=pass_name)
def transform(expr, ctx):
return opt_tester.transform(expr, ctx)
def test_pass_registration():
mod_pass = transform
assert isinstance(mod_pass, tvm.transform.ModulePass)
pass_info = mod_pass.info
assert pass_info.name == pass_name
assert pass_info.opt_level == opt_level
def test_pass_registration_no_decorator():
def direct_transform(expr, ctx):
return opt_tester.transform(expr, ctx)
mod_pass = tvm.transform.module_pass(direct_transform, opt_level=3)
assert isinstance(mod_pass, tvm.transform.ModulePass)
pass_info = mod_pass.info
assert pass_info.name == "direct_transform"
assert pass_info.opt_level == 3
def test_pass_run():
module_pass = transform
assert pass_name in str(module_pass)
updated_mod = module_pass(mod)
assert isinstance(updated_mod, tvm.IRModule)
v_abs, myabs = get_var_func()
new_v_add = updated_mod.get_global_var(v_abs.name_hint)
new_abs = updated_mod[new_v_add]
check_func(new_abs, myabs)
v_abs, myabs = get_var_func()
new_v_add = updated_mod.get_global_var(v_add.name_hint)
new_add = updated_mod[new_v_add]
check_func(new_add, func)
ret = opt_tester.transform(mod, pass_ctx)
transformed_v_add = ret.get_global_var(v_add.name_hint)
transformed_add = mod[transformed_v_add]
check_func(new_add, transformed_add)
x_nd = get_rand(shape, dtype)
y_nd = get_rand(shape, dtype)
ref_res = x_nd.numpy() + y_nd.numpy()
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", dev |
ice=dev, target=target).evaluate(new_add)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_add)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
test_pass_registration()
test_pass_registration_no_decorator
test_pass_run()
def test_function_class_pass():
@relay.transform.function_pass(opt_level=1)
class TestReplaceFunc:
"""Simple test function to replace one argument to another."""
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
return self.new_func
x = relay.var("x", shape=(10, 20))
f1 = relay.Function([x], x)
f2 = relay.Function([x], relay.log(x))
fpass = TestReplaceFunc(f1)
assert fpass.info.opt_level == 1
assert fpass.info.name == "TestReplaceFunc"
mod = tvm.IRModule.from_expr(f2)
mod = fpass(mod)
mod2 = tvm.IRModule.from_expr(f1)
mod2 = tvm.relay.transform.InferType()(mod2)
assert tvm.ir.structural_equal(mod["main"], mod2["main"])
@tvm.testing.uses_gpu
def test_function_pass():
shape = (10,)
dtype = "float32"
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
v_log = relay.GlobalVar("myLog")
log = relay.Function([x], relay.log(x))
mod = tvm.IRModule({v_log: log})
pass_name = "function_pass_test"
opt_level = 1
opt_tester = OptTester(mod)
pass_ctx = None
@_transform.function_pass(opt_level=opt_level, name=pass_name)
def transform(expr, mod, ctx):
return opt_tester.transform(expr, ctx)
def get_ref_log():
ref_log = relay.Function([x], relay.log(relay.add(x, x)))
return ref_log
def test_pass_registration():
function_pass = transform
assert isinstance(function_pass, _transform.FunctionPass)
pass_info = |
function_pass.info
assert pass_info.name == pass_name
assert pass_info.opt_level == opt_level
def test_pass_registration_no_decorator():
def direct_transform(expr, ctx):
return opt_tester.transform(expr, ctx)
mod_pass = _transform.function_pass(direct_transform, opt_level=0)
assert isinstance(mod_pass, _transform.FunctionPass)
pass_info = mod_pass.info
assert pass_info.name == "direct_transform"
assert pass_info.opt_level == 0
def test_pass_run():
function_pass = transform
assert pass_name in str(function_pass)
updated_mod = function_pass(mod)
assert isinstance(updated_mod, tvm.IRModule)
new_v_log = updated_mod.get_global_var(v_log.name_hint)
new_log = updated_mod[new_v_log]
check_func(new_log, get_ref_log())
ret = opt_tester.transform(log, pass_ctx)
check_func(new_log, ret)
x_nd = get_rand(shape, dtype)
ref_res = np.log(x_nd.numpy() * 2)
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_log)(x_nd)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_log)(x_nd)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
test_pass_registration()
test_pass_registration_no_decorator()
test_pass_run()
def test_module_class_pass():
@tvm.transform.module_pass(opt_level=1)
class TestPipeline:
"""Simple test function to replace one argument to another."""
def __init__(self, new_mod, replace):
self.new_mod = new_mod
self.replace = replace
def transform_module(self, mod, ctx):
if self.replace:
return self.new_mod
return mod
x = relay.var("x", shape=(10, 20))
m1 = tvm.IRModule.from_e |
xpr(relay.Function([x], x))
m2 = tvm.IRModule.from_expr(relay.Function([x], relay.log(x)))
fpass = TestPipeline(m2, replace=True)
assert fpass.info.name == "TestPipeline"
mod3 = fpass(m1)
assert mod3.same_as(m2)
mod4 = TestPipeline(m2, replace=False)(m1)
assert mod4.same_as(m1)
def test_pass_info():
info = tvm.transform.PassInfo(opt_level=1, name="xyz")
assert info.opt_level == 1
assert info.name == "xyz"
@tvm.testing.uses_gpu
def test_sequential_pass():
shape = (10,)
dtype = "float32"
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
y = relay.var("y", tp)
v_sub = relay.GlobalVar("mySub")
sub = relay.Function([x, y], relay.subtract(x, y))
z = relay.var("z", tp)
v_log = relay.GlobalVar("myLog")
log = relay.Function([z], relay.log(z))
mod = tvm.IRModule({v_sub: sub, v_log: log})
def get_ref_log():
ref_log = relay.Function([x], relay.log(relay.add(x, x)))
return ref_log
def get_ref_sub():
ref_sub = relay.Function([x, y], relay.subtract(relay.add(x, x), relay.add(y, y)))
return ref_sub
def get_ref_abs():
shape = (5, 10)
tp = relay.TensorType(shape, "float32")
a = relay.var("a", tp)
ref_abs = relay.Function([a], relay.abs(relay.add(a, a)))
return ref_abs
opt_tester = OptTester(mod)
pass_ctx = None
@tvm.transform.module_pass(opt_level=1)
def mod_transform(expr, ctx):
return opt_tester.transform(expr, ctx)
module_pass = mod_transform
@_transform.function_pass(opt_level=1)
def func_transform(expr, mod, ctx):
return opt_tester.transform(expr, ctx)
function_pass = func_transform
def test_pass_registration():
passes = [module_pass, function_pass]
opt_level = 2
pass_name = "sequential"
sequential = tvm.transform.Sequential(passes=passes, opt_level=opt_level)
pass_info = sequential.info
assert pass_info.name == pass_name |
assert pass_info.opt_level == opt_level
def test_no_pass():
passes = []
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
ret_mod = sequential(mod)
mod_func = ret_mod[v_sub]
check_func(sub, mod_func)
def test_only_module_pass():
passes = [module_pass]
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
with tvm.transform.PassContext(required_pass=["mod_transform"]):
ret_mod = sequential(mod)
sub_var, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
check_func(new_sub, sub)
abs_var, abs_func = get_var_func()
abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
check_func(new_abs, abs_func)
def test_only_function_pass():
passes = [function_pass]
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
with tvm.transform.PassContext(required_pass=["func_transform"]):
ret_mod = sequential(mod)
_, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
check_func(new_sub, get_ref_sub())
log_var, new_log = extract_var_func(ret_mod, v_log.name_hint)
check_func(new_log, get_ref_log())
def test_multiple_passes():
mod = tvm.IRModule({v_sub: sub, v_log: log})
passes = [module_pass, function_pass]
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
required = ["mod_transform", "func_transform"]
with tvm.transform.PassContext(required_pass=required):
ret_mod = sequential(mod)
abs_var, abs_func = get_var_func()
abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
check_func(new_abs, get_ref_abs())
_, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
check_func(new_sub, get_ref_sub())
_, new_log = extract_var_func(ret_mod, v_log.name_hint)
check_func(new_log, |
get_ref_log())
x_nd = get_rand(shape, dtype)
y_nd = get_rand(shape, dtype)
ref_res = np.subtract(x_nd.numpy() * 2, y_nd.numpy() * 2)
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_sub)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_sub)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
x_nd = get_rand((5, 10), dtype)
ref_res = np.abs(x_nd.numpy() * 2)
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_abs)(x_nd)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_abs)(x_nd)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
test_pass_registration()
test_no_pass()
test_only_module_pass()
test_only_function_pass()
test_multiple_passes()
def test_sequential_with_scoping():
shape = (1, 2, 3)
c_data = np.array(shape).astype("float32")
tp = relay.TensorType(shape, "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", tp)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(x, y)
z = relay.add(y, c)
z1 = relay.add(y, c)
z2 = relay.add(z, z1)
return relay.Function([x], z2)
def expected():
x = relay.var("x", tp)
c_folded = (c_data + c_data) * 2
y = relay.add(x, relay.const(c_folded))
z = relay.add(y, relay.const(c_data))
z1 = relay.add(z, z)
return relay.Function([x], z1)
seq = tvm.transform.Sequential( |
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.AlterOpLayout(),
]
)
mod = tvm.IRModule({"main": before()})
with tvm.transform.PassContext(opt_level=3):
with tvm.target.Target("llvm"):
mod = seq(mod)
zz = mod["main"]
zexpected = run_infer_type(expected())
assert tvm.ir.structural_equal(zz, zexpected)
def test_nested_sequential_with_scoping():
def before():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(1, 16, -1))
y = relay.reshape(y, newshape=(4, 8, -1, 16))
y = relay.reverse_reshape(y, newshape=(32, 0, -1))
return tvm.IRModule.from_expr(y)
def expected():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(32, 16, 16))
return tvm.IRModule.from_expr(y)
z = before()
passes = [
tvm.transform.Sequential([relay.transform.SimplifyExpr()]),
]
with tvm.transform.PassContext(opt_level=1):
zz = tvm.transform.Sequential(passes)(z)
expected = relay.transform.InferType()(expected())
assert tvm.ir.structural_equal(zz, expected)
def test_print_ir(capfd):
shape = (1, 2, 3)
tp = relay.TensorType(shape, "float32")
x = relay.var("x", tp)
y = relay.add(x, x)
y = relay.multiply(y, relay.const(2, "float32"))
func = relay.Function([x], y)
seq = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
tvm.transform.PrintIR(),
relay.transform.DeadCodeElimination(),
]
)
mod = tvm.IRModule({"main": func} |
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
out = capfd.readouterr().err
assert "PrintIR" in out
assert "multiply" in out
@tvm.instrument.pass_instrument
class PassCounter:
def __init__(self):
self.counts = 1234
def enter_pass_ctx(self):
self.counts = 0
def exit_pass_ctx(self):
self.counts = 0
def run_before_pass(self, module, info):
self.counts += 1
def get_counts(self):
return self.counts
def test_print_debug_callback():
shape = (1, 2, 3)
tp = relay.TensorType(shape, "float32")
x = relay.var("x", tp)
y = relay.add(x, x)
y = relay.multiply(y, relay.const(2, "float32"))
func = relay.Function([x], y)
seq = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
relay.transform.DeadCodeElimination(),
]
)
mod = tvm.IRModule({"main": func})
pass_counter = PassCounter()
with tvm.transform.PassContext(opt_level=3, instruments=[pass_counter]):
assert pass_counter.get_counts() == 0
mod = seq(mod)
assert pass_counter.get_counts() == 6
assert pass_counter.get_counts() == 0
if __name__ == "__main__":
pytest.main() |
import tvm |
import tvm.testing
from tvm.relay |
import Function, transform
from tvm.relay.testing |
import inception_v3 |
import pytest |
import sys
def optimize_and_check(before_program, after_program, passes):
if isinstance(before_program, str):
before_program = tvm.parser.parse(before_program)
if isinstance(after_program, str):
after_program = tvm.parser.parse(after_program)
if not isinstance(passes, list):
passes = [passes]
optimize = tvm.transform.Sequential(passes)
optimized_program = optimize(before_program)
print("Actual:")
print(optimized_program)
print("Expected:")
print(after_program)
assert tvm.ir.structural_equal(optimized_program, after_program, map_free_vars=True)
def test_simple_linear():
before_program = """
def @main(%x: int) {
let %y = %x + %x;
let %z = %y + %y;
let %w = %z + %z;
%w
}
"""
after_program = """
def @main(%x: int) {
let %y = %x + %x;
let %_0 = memory.kill(%x);
let %z = %y + %y;
let %_1 = memory.kill(%y);
let %w = %z + %z;
let %_2 = memory.kill(%z);
%w
}
"""
optimize_and_check(before_program, after_program, transform.ManifestLifetimes())
def test_simple_if():
before_program = """
def @main(%x: int) {
let %y = cast(%x, dtype="bool");
let %z = if (%y) {
let %v0 = %x + %x;
let %v1 = %v0 * 2;
%v1
} else {
%x
};
%z
}
"""
after_program = """
def @main(%x: int) {
let %y = cast(%x, dtype="bool");
let %z = if (%y) {
let %v0 = %x + %x;
let %_0 = memory.kill(%x);
let %v1 = %v0 * 2;
let %_1 = memory.kill(%v0);
%v1
} else {
%x
};
let %_1 = memory.kill(%y);
%z
}
"""
optimize_and_check(before_program, after_program, transform.ManifestLifetimes())
def test_simple_match():
before_program = """
type List[A] {
Cons(A, List[A]),
Nil,
}
def @main(%x: |
int) {
let %l : List[int] = Nil;
let %m = (match (%l) {
Cons(%head, %rest) => {
let %y = %x + 1;
let %z = %y + %y;
%z
},
Nil => -1,
});
%m
}
"""
after_program = """
type List[A] {
Cons(A, List[A]),
Nil,
}
def @main(%x: int) {
let %l : List[int] = Nil;
let %m = (match (%l) {
Cons(%head, %rest) => {
let %y = %x + 1;
let %_0 = memory.kill(%x);
let %z = %y + %y;
let %_1 = memory.kill(%y);
/* TODO: %head and %rest should be immediately killed */
%z
},
Nil => -1
});
let %_2 = memory.kill(%l);
%m
}
"""
optimize_and_check(before_program, after_program, transform.ManifestLifetimes())
if __name__ == "__main__":
tvm.testing.main() |
"""Unit tests for merge compiler regions.""" |
import tvm
from tvm |
import relay
from tvm.relay.op.annotation |
import compiler_begin, compiler_end
from tvm.relay.testing |
import run_opt_pass
def test_diamond_graph_fanouts():
"""
This tests that the data dependencies present in a diamond-shaped
graph are correctly resolved by the merging pass.
O = supported by target
X = not supported by target
O O
/ \\ / \\
O X --> O + + X
\\ / \\ /
O O
Note that we can't just merge the three supported operators together,
otherwise both subgraphs would depend on the other.
"""
def diamond_graph_fanouts():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test")
O_1 = relay.abs(cb_1)
ce_1 = compiler_end(O_1, "test")
ce_2 = compiler_end(O_1, "test")
cb_2 = compiler_begin(ce_1, "test")
cb_3 = compiler_begin(ce_2, "default")
O_2 = relay.nn.relu(cb_2)
ce_3 = compiler_end(O_2, "test")
X = relay.tanh(cb_3)
ce_4 = compiler_end(X, "default")
cb_4 = compiler_begin(ce_3, "test")
cb_5 = compiler_begin(ce_4, "test")
O_3 = relay.add(cb_4, cb_5)
ce_5 = compiler_end(O_3, "test")
diamond = relay.Function([data], ce_5)
return diamond
def expected():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test")
O_1 = relay.abs(cb_1)
ce_2 = compiler_end(O_1, "test")
O_2 = relay.nn.relu(O_1)
ce_3 = compiler_end(O_2, "test")
cb_3 = compiler_begin(ce_2, "default")
X = relay.tanh(cb_3)
ce_4 = compiler_end(X, "default")
cb_4 = compiler_begin(ce_3, "test")
cb_5 = compiler_begin(ce_4, "test")
O_3 = relay.add(cb_4, cb_5)
ce_5 = compiler_end(O_3, "test")
func = relay.Function([data], ce_5)
return func
result = run_opt_pass(diamond_graph_fanouts(), relay.transform.MergeCompilerRegions())
golden = run_opt_pass(expected(), relay.transform.InferType())
assert tvm.ir.structural_equal( |
result, golden)
def test_example_graph():
"""This tests the merging algorithm on the example used in the RFC.
See the RFC here: https:
Blue nodes are adds (target: test), red nodes are subtracts (target: default).
"""
def annotated():
in_1 = relay.var("in_1", shape=(10, 10), dtype="float32")
in_2 = relay.var("in_2", shape=(10, 10), dtype="float32")
in_3 = relay.var("in_3", shape=(10, 10), dtype="float32")
in_4 = relay.var("in_4", shape=(10, 10), dtype="float32")
in_5 = relay.var("in_5", shape=(10, 10), dtype="float32")
in_6 = relay.var("in_6", shape=(10, 10), dtype="float32")
in_7 = relay.var("in_7", shape=(10, 10), dtype="float32")
in_8 = relay.var("in_8", shape=(10, 10), dtype="float32")
in_9 = relay.var("in_9", shape=(10, 10), dtype="float32")
in_10 = relay.var("in_10", shape=(10, 10), dtype="float32")
begin0 = compiler_begin(in_1, "test")
begin1 = compiler_begin(in_2, "test")
begin2 = compiler_begin(in_3, "test")
begin3 = compiler_begin(in_4, "test")
node0 = relay.add(begin0, begin1)
node1 = relay.add(begin2, begin3)
end0 = compiler_end(node0, "test")
end1 = compiler_end(node1, "test")
begin4 = compiler_begin(end0, "test")
begin5 = compiler_begin(end1, "test")
node2 = relay.add(begin4, begin5)
end2 = compiler_end(node2, "test")
dbegin0 = compiler_begin(in_5, "default")
dbegin1 = compiler_begin(in_6, "default")
node3 = relay.subtract(dbegin0, dbegin1)
dbegin2 = compiler_begin(in_7, "default")
dend1 = compiler_end(node3, "default")
dbegin3 = compiler_begin(dend1, "default")
node4 = relay.subtract(dbegin2, dbegin3)
dend2 = compiler_end(node4, "default")
begin6 = compiler_begin(end2, "test")
begin7 = compiler_begin(dend2, "test")
node5 = relay.add(begin6, begin7)
end3 = compiler_end(node5, "test")
end4 = compile |
r_end(node5, "test")
dbegin4 = compiler_begin(in_8, "default")
dbegin5 = compiler_begin(end3, "default")
node6 = relay.subtract(dbegin4, dbegin5)
begin8 = compiler_begin(in_9, "test")
begin9 = compiler_begin(end4, "test")
node7 = relay.add(begin8, begin9)
end5 = compiler_end(node7, "test")
dend3 = compiler_end(node6, "default")
begin10 = compiler_begin(dend3, "test")
begin11 = compiler_begin(end5, "test")
node8 = relay.add(begin10, begin11)
end6 = compiler_end(node8, "test")
begin12 = compiler_begin(in_10, "test")
begin13 = compiler_begin(end6, "test")
node9 = relay.add(begin12, begin13)
end7 = compiler_end(node9, "test")
f = relay.Function([in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10], end7)
mod = tvm.IRModule.from_expr(f)
return mod
def expected():
in_1 = relay.var("in_1", shape=(10, 10), dtype="float32")
in_2 = relay.var("in_2", shape=(10, 10), dtype="float32")
in_3 = relay.var("in_3", shape=(10, 10), dtype="float32")
in_4 = relay.var("in_4", shape=(10, 10), dtype="float32")
in_5 = relay.var("in_5", shape=(10, 10), dtype="float32")
in_6 = relay.var("in_6", shape=(10, 10), dtype="float32")
in_7 = relay.var("in_7", shape=(10, 10), dtype="float32")
in_8 = relay.var("in_8", shape=(10, 10), dtype="float32")
in_9 = relay.var("in_9", shape=(10, 10), dtype="float32")
in_10 = relay.var("in_10", shape=(10, 10), dtype="float32")
begin0 = compiler_begin(in_1, "test")
begin1 = compiler_begin(in_2, "test")
begin2 = compiler_begin(in_3, "test")
begin3 = compiler_begin(in_4, "test")
node0 = relay.add(begin0, begin1)
node1 = relay.add(begin2, begin3)
node2 = relay.add(node0, node1)
dbegin0 = compiler_begin(in_5, "default")
dbegin1 = compiler_begin(in_6, "default")
dbegin2 = compiler_begin(in_7, " |
default")
node3 = relay.subtract(dbegin0, dbegin1)
node4 = relay.subtract(dbegin2, node3)
dend0 = compiler_end(node4, "default")
begin4 = compiler_begin(dend0, "test")
begin5 = compiler_begin(in_9, "test")
node5 = relay.add(node2, begin4)
end1 = compiler_end(node5, "test")
dbegin4 = compiler_begin(end1, "default")
dbegin5 = compiler_begin(in_8, "default")
node6 = relay.subtract(dbegin5, dbegin4)
dend1 = compiler_end(node6, "default")
node7 = relay.add(begin5, node5)
end2 = compiler_end(node7, "test")
begin6 = compiler_begin(end2, "test")
begin7 = compiler_begin(dend1, "test")
node8 = relay.add(begin7, begin6)
begin8 = compiler_begin(in_10, "test")
node9 = relay.add(begin8, node8)
end3 = compiler_end(node9, "test")
f = relay.Function([in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10], end3)
mod = tvm.IRModule.from_expr(f)
return mod
mod = annotated()
mod = relay.transform.MergeCompilerRegions()(mod)
mod = relay.transform.InferType()(mod)
ref_mod = expected()
ref_mod = relay.transform.InferType()(ref_mod)
assert tvm.ir.structural_equal(mod, ref_mod)
if __name__ == "__main__":
test_diamond_graph_fanouts()
test_example_graph() |
"""Unit tests for merge composite.""" |
import pytest |
import tvm
from tvm |
import relay, tir
from tvm.relay.dataflow_pattern |
import TupleGetItemPattern, is_op, wildcard
from tvm.relay.testing |
import run_opt_pass
"""
The merge composite pass is designed to merge multiple relay operators, that
match a given pattern, and combine them into a single relay function.
For example suppose we have the graph:
conv2d
| (merge composite pass)
bias_add ====> conv2d_bias_relu
| (our target)
relu
Our Relay IR before the pass:
fn (%data: Tensor[(1, 512, 28, 28), float32], %kernel: Tensor[(256, 512, 1, 1), float32],
%bias: Tensor[(256), float32]) -> Tensor[(1, 256, 28, 28), float32] {
%0 = nn.conv2d(%data, %kernel, kernel_size=[1, 1])
/* ty=Tensor[(1, 256, 28, 28), float32] */;
%1 = nn.bias_add(%0, %bias) /* ty=Tensor[(1, 256, 28, 28), float32] */;
nn.relu(%1) /* ty=Tensor[(1, 256, 28, 28), float32] */
}
Our Relay IR after the pass:
fn (%data: Tensor[(1, 512, 28, 28), float32], %kernel: Tensor[(256, 512, 1, 1), float32],
%bias: Tensor[(256), float32]) -> Tensor[(1, 256, 28, 28), float32] {
%2 = fn (%x: Tensor[(1, 512, 28, 28), float32], %y: Tensor[(256, 512, 1, 1), float32],
%z: Tensor[(256), float32], Primitive=1, Composite="conv2d_bias_relu") ->
Tensor[(1, 256, 28, 28), float32] {
%0 = nn.conv2d(%x, %y, kernel_size=[1, 1]) /* ty=Tensor[(1, 256, 28, 28), float32] */;
%1 = nn.bias_add(%0, %z) /* ty=Tensor[(1, 256, 28, 28), float32] */;
nn.relu(%1) /* ty=Tensor[(1, 256, 28, 28), float32] */
};
%2(%data, %kernel, %bias) /* ty=Tensor[(1, 256, 28, 28), float32] */
}
As you can see in the second relay example, the pattern we specified has been wrapped
in a function. The function is then called, producing the same result as the first relay
example.
One convenient use for this pass is to offload multiple operators to a single external
codegen function.
"""
def make_add_sub_mul_pattern():
r"""Create a pattern to match the following graph.
add sub
\ /
\ /
mul
"""
x = wildcard() |
y = wildcard()
return (x + y) * (x - y)
def make_add_relu_pattern():
r"""Create a pattern to match the following graph.
add
|
relu
"""
add_node = wildcard() + wildcard()
r = is_op("nn.relu")(add_node)
return r
def make_conv_bias_relu_pattern():
r"""Create a pattern to match the following graph.
conv2d
|
bias_add
|
relu
"""
x = wildcard()
y = wildcard()
z = wildcard()
conv_node = is_op("nn.conv2d")(x, y)
bias_node = is_op("nn.bias_add")(conv_node, z)
r = is_op("nn.relu")(bias_node)
return r
def make_pattern_with_optional():
r"""Create a pattern to match the following graph. Note that relu is optinal.
conv2d
|
bias_add
|
(relu)
"""
x = wildcard()
y = wildcard()
z = wildcard()
conv_node = is_op("nn.conv2d")(x, y)
bias_node = is_op("nn.bias_add")(conv_node, z)
r = bias_node.optional(lambda x: is_op("nn.relu")(x))
return r
def make_add_add_add_pattern():
r"""Create a pattern to match the following graph.
Useful for testing re-using a call node.
x y
/ \ /
| add
\ | \
add |
| /
add
"""
x = wildcard()
y = wildcard()
add_node = is_op("add")(x, y)
add_node_1 = is_op("add")(x, add_node)
r = is_op("add")(add_node_1, add_node)
return r
def make_bn_relu_pattern():
r"""Create a pattern to match the following graph.
batch_norm
|
TupleGetItem(0)
|
relu
"""
x = wildcard()
gamma = wildcard()
beta = wildcard()
moving_mean = wildcard()
moving_var = wildcard()
bn_node = is_op("nn.batch_norm")(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = TupleGetItemPattern(bn_node, 0)
r = is_op("nn.relu")(tuple_get_item_node)
return r
def check_result(pattern_table, graph, expected_graph, import_prelude=False):
"""Utility function to check merge composite results.""" |
result = run_opt_pass(
graph, relay.transform.MergeComposite(pattern_table), import_prelude=import_prelude
)
assert not relay.analysis.free_vars(result), "Found free vars in the result graph: {0}".format(
str(result)
)
expected = run_opt_pass(expected_graph, relay.transform.InferType())
assert tvm.ir.structural_equal(
result, expected, map_free_vars=True
), "Graph mismatch: output vs. expected\n{0}\n=====\n{1}".format(str(result), str(expected))
def test_simple_merge():
r"""Test composite function is correctly produced from simple graph.
We could expect the pattern `make_add_relu_pattern` to be merged
into a single op `add_relu`.
a b
\ / a b
add ====> \ /
| add_relu
relu
"""
pattern_table = [("add_relu", make_add_relu_pattern())]
def before():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
add_node = relay.add(a, b)
r = relay.nn.relu(add_node)
return relay.Function([a, b], r)
def expected():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
in_1 = relay.var("in_1", shape=(10, 10))
in_2 = relay.var("in_2", shape=(10, 10))
add_node = relay.add(in_1, in_2)
relu_node = relay.nn.relu(add_node)
add_relu = relay.Function([in_1, in_2], relu_node)
add_relu = add_relu.with_attr("Composite", "add_relu")
add_relu = add_relu.with_attr("PartitionedFromPattern", "add_nn.relu_")
r = relay.Call(add_relu, [a, b])
return relay.Function([a, b], r)
check_result(pattern_table, before(), expected())
def test_branch_merge():
r"""Test composite function is correctly produced from branching graph.
We would expect the pattern `make_add_sub_mul_pattern` to be merged
into a single op `add_sub_mul`.
a b a b
\/ \/
add sub a b |
\ / \/
\ / add_sub_mul
mul c |
/ \ \ |
c / c | ====> add_sub_mul
\/ \/ |
add sub |
\ / relu
\ /
mul
|
|
relu
"""
pattern_table = [("add_sub_mul", make_add_sub_mul_pattern())]
def before():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
c = relay.var("c", shape=(10, 10))
add_node = relay.add(a, b)
sub_node = relay.subtract(a, b)
mul_node = relay.multiply(add_node, sub_node)
add_node_2 = relay.add(c, mul_node)
sub_node_2 = relay.subtract(c, mul_node)
mul_node_2 = relay.multiply(add_node_2, sub_node_2)
r = relay.nn.relu(mul_node_2)
return relay.Function([a, b, c], r)
def expected():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
c = relay.var("c", shape=(10, 10))
in_1 = relay.var("in_1", shape=(10, 10))
in_2 = relay.var("in_2", shape=(10, 10))
add_node = relay.add(in_1, in_2)
sub_node = relay.subtract(in_1, in_2)
mul_node = relay.multiply(add_node, sub_node)
add_sub_mul = relay.Function([in_1, in_2], mul_node)
add_sub_mul = add_sub_mul.with_attr("Composite", "add_sub_mul")
add_sub_mul = add_sub_mul.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
in_3 = relay.var("in_3", shape=(10, 10))
in_4 = relay.var("in_4", shape=(10, 10))
add_node_1 = relay.add(in_3, in_4)
sub_node_1 = relay.subtract(in_3, in_4)
mul_node_1 = relay.multiply(add_node_1, sub_node_1)
add_sub_mul_1 = relay.Function([in_3, in_4], mul_node_1)
add_sub_mul_1 = add_sub_mul_1.with_attr("Composite", "add_sub_mul")
add_sub_mul_1 = |
add_sub_mul_1.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
m_add_sub_mul_1 = relay.Call(add_sub_mul, [a, b])
m_add_sub_mul_2 = relay.Call(add_sub_mul_1, [c, m_add_sub_mul_1])
r = relay.nn.relu(m_add_sub_mul_2)
return relay.Function([a, b, c], r)
check_result(pattern_table, before(), expected())
def test_reuse_call_merge():
r"""Test composite function is correctly produced from simple graph
which re-uses call nodes.
We could expect the pattern `make_add_add_add` to be merged
into a single op `add_add_add`.
x y
\ / \
sub | x y
/ | / \ / |
| add ====> sub |
\ | \ | /
add | add_add_add
| /
add
"""
pattern_table = [("add_add_add", make_add_add_add_pattern())]
def before():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
sub_node = relay.subtract(a, b)
add_node = relay.add(sub_node, b)
add_node_1 = relay.add(sub_node, add_node)
r = relay.add(add_node_1, add_node)
return relay.Function([a, b], r)
def expected():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
in_1 = relay.var("in_1", shape=(10, 10))
in_2 = relay.var("in_2", shape=(10, 10))
add_node = relay.add(in_1, in_2)
add_node_1 = relay.add(in_1, add_node)
add_node_2 = relay.add(add_node_1, add_node)
add_add_add = relay.Function([in_1, in_2], add_node_2)
add_add_add = add_add_add.with_attr("Composite", "add_add_add")
add_add_add = add_add_add.with_attr("PartitionedFromPattern", "add_add_add_")
sub_node = relay.subtract(a, b)
call = relay.Call(add_add_add, [sub_node, b])
return relay.Function([a, b], call)
check_result(pattern_table, before(), expected())
def test_multipl |
e_patterns():
r"""Test different patterns are merged correctly in the graph.
We would expect the pattern `make_conv_bias_relu_pattern` to be merged
into a single op `conv_bias_relu`. We would also expect `make_add_relu_pattern`
to be merged into a single op `add_relu`.
data kernel
\ /
\ /
conv2d data kernel bias
| \ | /
| bias conv2d_bias_relu
| / |
bias_add ====> | a
| | /
relu a add_relu
\ / |
add | b
| | /
relu b mul
| /
mul
"""
pattern_table = [
("conv2d_bias_relu", make_conv_bias_relu_pattern()),
("add_relu", make_add_relu_pattern()),
]
def before():
data = relay.var("data", shape=(1, 512, 28, 28))
kernel = relay.var("kernel", shape=(256, 512, 1, 1))
bias = relay.var("bias", shape=(256,))
a = relay.var("a", shape=(1, 256, 28, 28))
b = relay.var("b", shape=(1, 256, 28, 28))
conv_node = relay.nn.conv2d(
data, kernel, kernel_size=(1, 1), padding=(0, 0), strides=(1, 1)
)
bias_node = relay.nn.bias_add(conv_node, bias)
relu_node = relay.nn.relu(bias_node)
add_node = relay.add(relu_node, a)
relu_node_2 = relay.nn.relu(add_node)
r = relay.multiply(relu_node_2, b)
return relay.Function([data, kernel, bias, a, b], r)
def expected():
data = relay.var("data", shape=(1, 512, 28, 28))
kernel = relay.var("kernel", shape=(256, 512, 1, 1))
bias = relay.var("bias", shape=(256,))
a = relay.var("a", shape=(1, 256, 28, 28))
b |
= relay.var("b", shape=(1, 256, 28, 28))
in_1 = relay.var("in_1", shape=(1, 512, 28, 28))
in_2 = relay.var("in_2", shape=(256, 512, 1, 1))
in_3 = relay.var("in_3", shape=(256,))
conv_node = relay.nn.conv2d(in_1, in_2, kernel_size=(1, 1), padding=(0, 0), strides=(1, 1))
bias_node = relay.nn.bias_add(conv_node, in_3)
r = relay.nn.relu(bias_node)
conv_bias_add_relu = relay.Function([in_1, in_2, in_3], r)
conv_bias_add_relu = conv_bias_add_relu.with_attr("Composite", "conv2d_bias_relu")
conv_bias_add_relu = conv_bias_add_relu.with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
in_4 = relay.var("in_4", shape=(1, 256, 28, 28))
in_5 = relay.var("in_5", shape=(1, 256, 28, 28))
add_node = relay.add(in_4, in_5)
r = relay.nn.relu(add_node)
add_relu = relay.Function([in_4, in_5], r)
add_relu = add_relu.with_attr("Composite", "add_relu")
add_relu = add_relu.with_attr("PartitionedFromPattern", "add_nn.relu_")
conv_bias_add_relu_1 = relay.Call(conv_bias_add_relu, [data, kernel, bias])
add_relu_1 = relay.Call(add_relu, [conv_bias_add_relu_1, a])
r = relay.multiply(add_relu_1, b)
return relay.Function([data, kernel, bias, a, b], r)
check_result(pattern_table, before(), expected())
def test_optional_pattern():
r"""Test the pattern with optional operators. We can define a pattern with some operators
optional. The merge composite pass will create composite functions for all matched patterns,
but with different "PartitionedFromPattern" attribute. We expect the backend codegen to
analyze that attribute and determine the corresponding action.
Pattern: Matched Case A: Matched Case B:
conv2d conv2d conv2d
| | |
bias_add bias_add bias_add
| |
(relu) relu
In the a |
bove example, the composite function for matched case A would have
PartitionedFromPattern="nn.conv2d_nn.bias_add_nn.relu_" while the one for matched case B
woud be "nn.conv2d_nn.bias_add_".
"""
pattern_table = [("layer", make_pattern_with_optional())]
def before():
x = relay.var("x", shape=(1, 3, 7, 7))
w1 = relay.var("w", shape=(3, 3, 1, 1))
b1 = relay.var("b", shape=(3,))
w2 = relay.var("w", shape=(3, 3, 1, 1))
b2 = relay.var("b", shape=(3,))
conv = relay.nn.conv2d(x, w1, kernel_size=(1, 1))
bias = relay.nn.bias_add(conv, b1)
relu = relay.nn.relu(bias)
conv = relay.nn.conv2d(relu, w2, kernel_size=(1, 1))
bias = relay.nn.bias_add(conv, b2)
return relay.Function([x, w1, w2, b1, b2], bias)
def expected():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
conv = relay.nn.conv2d(x, w, kernel_size=(1, 1))
bias = relay.nn.bias_add(conv, b)
relu = relay.nn.relu(bias)
func1 = relay.Function([x, w, b], relu)
func1 = func1.with_attr("Composite", "layer")
func1 = func1.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
conv = relay.nn.conv2d(x, w, kernel_size=(1, 1))
bias = relay.nn.bias_add(conv, b)
func2 = relay.Function([x, w, b], bias)
func2 = func2.with_attr("Composite", "layer")
func2 = func2.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_")
x = relay.var("x", shape=(1, 3, 7, 7))
w1 = relay.var("w", shape=(3, 3, 1, 1))
b1 = relay.var("b", shape=(3,))
w2 = relay.var("w", shape=(3, 3, 1, 1))
b2 = relay.var("b", shape=(3,))
out1 = func1(x, w1, b1)
out2 = func2(out1, w2, b2)
return relay.Function([x, w1, w2, b1, b2], out2)
check_result(pattern_table, before(), expected())
def test_mer |
ge_order():
r"""Test that patterns are merged in the order they exist in the pattern table.
There can be cases where one pattern is a subgraph of another, in which case
it is not clear which match should take priority. The priority should come
from the order in which the patterns are declared in the pattern table. The
first patterns will be merged with highest priority and the last with lowest.
A: B: C:
add add abs
| | |
abs abs relu
|
relu
"""
def pattern_A():
x = wildcard()
y = wildcard()
out = is_op("add")(x, y)
out = is_op("abs")(out)
out = is_op("nn.relu")(out)
return out
def pattern_B():
x = wildcard()
y = wildcard()
out = is_op("add")(x, y)
out = is_op("abs")(out)
return out
def pattern_C():
x = wildcard()
out = is_op("abs")(x)
out = is_op("nn.relu")(out)
return out
def before():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
out = relay.add(input_1, input_2)
out = relay.abs(out)
out = relay.nn.relu(out)
return relay.Function([input_1, input_2], out)
def after_A_priority():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
x = relay.var("x")
y = relay.var("y")
out = relay.add(x, y)
out = relay.abs(out)
out = relay.nn.relu(out)
merged_func = relay.Function([x, y], out)
merged_func = merged_func.with_attr("Composite", "A")
merged_func = merged_func.with_attr("PartitionedFromPattern", "add_abs_nn.relu_")
ret = relay.Call(merged_func, [input_1, input_2])
return relay.Function([input_1, input_2], ret)
def after_B_priority():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10)) |
x = relay.var("x")
y = relay.var("y")
out = relay.add(x, y)
out = relay.abs(out)
merged_func = relay.Function([x, y], out)
merged_func = merged_func.with_attr("Composite", "B")
merged_func = merged_func.with_attr("PartitionedFromPattern", "add_abs_")
out = relay.Call(merged_func, [input_1, input_2])
ret = relay.nn.relu(out)
return relay.Function([input_1, input_2], ret)
def after_C_priority():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
x = relay.var("x")
out = relay.abs(x)
out = relay.nn.relu(out)
merged_func = relay.Function([x], out)
merged_func = merged_func.with_attr("Composite", "C")
merged_func = merged_func.with_attr("PartitionedFromPattern", "abs_nn.relu_")
out = relay.add(input_1, input_2)
ret = relay.Call(merged_func, [out])
return relay.Function([input_1, input_2], ret)
pattern_table = [
("A", pattern_A()),
("B", pattern_B()),
("C", pattern_C()),
]
check_result(pattern_table, before(), after_A_priority())
pattern_table = [
("B", pattern_B()),
("C", pattern_C()),
("A", pattern_A()),
]
check_result(pattern_table, before(), after_B_priority())
pattern_table = [
("C", pattern_C()),
("A", pattern_A()),
("B", pattern_B()),
]
check_result(pattern_table, before(), after_C_priority())
def test_parallel_merge():
r"""Tests that parallel patterns relying on the same inputs are correctly merged.
The test graph is difficult to draw out as ascii art. It is essentially two parallel
add-sub-mul units which both consume input_1 and input_2 with their results being multiplied
to give the output. We expect both parallel branches should get merged and both should still
consume the same input variables, input_1 and input_2."""
def before():
input_1 |
= relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
branch_1_add = relay.add(input_1, input_2)
branch_1_sub = relay.subtract(input_1, input_2)
branch_1 = relay.multiply(branch_1_add, branch_1_sub)
branch_2_add = relay.add(input_1, input_2)
branch_2_sub = relay.subtract(input_1, input_2)
branch_2 = relay.multiply(branch_2_add, branch_2_sub)
out = relay.multiply(branch_1, branch_2)
return relay.Function([input_1, input_2], out)
def expected():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
x = relay.var("x")
y = relay.var("y")
branch_1 = relay.multiply(relay.add(x, y), relay.subtract(x, y))
func_1 = relay.Function([x, y], branch_1)
func_1 = func_1.with_attr("Composite", "add_sub_mul")
func_1 = func_1.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
call_1 = relay.Call(func_1, [input_1, input_2])
x1 = relay.var("x1")
y1 = relay.var("y1")
branch_2 = relay.multiply(relay.add(x1, y1), relay.subtract(x1, y1))
func_2 = relay.Function([x1, y1], branch_2)
func_2 = func_2.with_attr("Composite", "add_sub_mul")
func_2 = func_2.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
call_2 = relay.Call(func_2, [input_1, input_2])
out = relay.multiply(call_1, call_2)
return relay.Function([input_1, input_2], out)
pattern_table = [("add_sub_mul", make_add_sub_mul_pattern())]
check_result(pattern_table, before(), expected())
def test_multiple_input_subgraphs():
r"""Test the case when multiple input subgraphs feed into another subgraph.
(1) (2) (3) (4)
add add add add
| | | |
relu relu relu relu
\ / \ /
\ / \ /
add sub
\ /
\ /
\ / |
mul
----> When 1=3 and 2=4 (Case 'A')
add_relu add_relu
\ /
\ /
add_sub_mul
----> When 1!=3 and 2!=4 (Case 'B')
add_relu add_relu add_relu add_relu
\ / \ /
\ / \ /
add sub
\ /
-------- -----
\ /
mul
The difference in behaviour comes from the fact that add_sub_mul expects that the
inputs to add and sub are identical (the same two relay expressions). So when you
have 4 independent inputs, the pattern should not be merged.
"""
def before():
before_funcs = {}
inputs = [relay.var("input_" + str(i), shape=(10, 10)) for i in range(8)]
add_relu_1 = relay.add(inputs[0], inputs[1])
add_relu_1 = relay.nn.relu(add_relu_1)
add_relu_2 = relay.add(inputs[2], inputs[3])
add_relu_2 = relay.nn.relu(add_relu_2)
add_relu_3 = relay.add(inputs[4], inputs[5])
add_relu_3 = relay.nn.relu(add_relu_3)
add_relu_4 = relay.add(inputs[6], inputs[7])
add_relu_4 = relay.nn.relu(add_relu_4)
add = relay.add(add_relu_1, add_relu_2)
sub = relay.subtract(add_relu_3, add_relu_4)
out = relay.multiply(add, sub)
before_funcs["B"] = relay.Function(inputs, out)
sub = relay.subtract(add_relu_1, add_relu_2)
out = relay.multiply(add, sub)
before_funcs["A"] = relay.Function(inputs[:4], out)
return before_funcs
def after_A():
inputs = [relay.var("input_" + str(i), shape=(10, 10)) for i in range(4)]
x = relay.var("x")
y = relay.var("y")
add_relu_1 = relay.add(x, y)
add_relu_1 = relay.nn.relu(add_relu_1)
add_relu_1 = relay.Function([x, y], add_relu_1)
add_relu_1 = add_relu_1.with_attr("Composite", "add_relu")
add_relu_1 = add_relu_1.with_attr("PartitionedFromPattern", "add_nn.relu_")
add_relu_cal |
l_1 = relay.Call(add_relu_1, [inputs[0], inputs[1]])
x1 = relay.var("x1")
y1 = relay.var("y1")
add_relu_2 = relay.add(x1, y1)
add_relu_2 = relay.nn.relu(add_relu_2)
add_relu_2 = relay.Function([x1, y1], add_relu_2)
add_relu_2 = add_relu_2.with_attr("Composite", "add_relu")
add_relu_2 = add_relu_2.with_attr("PartitionedFromPattern", "add_nn.relu_")
add_relu_call_2 = relay.Call(add_relu_2, [inputs[2], inputs[3]])
x2 = relay.var("x2")
y2 = relay.var("y2")
add = relay.add(x2, y2)
sub = relay.subtract(x2, y2)
add_sub_mul = relay.multiply(add, sub)
add_sub_mul = relay.Function([x2, y2], add_sub_mul)
add_sub_mul = add_sub_mul.with_attr("Composite", "add_sub_mul")
add_sub_mul = add_sub_mul.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
add_sub_mul_call = relay.Call(add_sub_mul, [add_relu_call_1, add_relu_call_2])
return relay.Function(inputs, add_sub_mul_call)
def after_B():
inputs = [relay.var("input_" + str(i), shape=(10, 10)) for i in range(8)]
add_relu_calls = []
for i in range(4):
x = relay.var("x" + str(i))
y = relay.var("x" + str(i))
add_relu = relay.add(x, y)
add_relu = relay.nn.relu(add_relu)
add_relu = relay.Function([x, y], add_relu)
add_relu = add_relu.with_attr("Composite", "add_relu")
add_relu = add_relu.with_attr("PartitionedFromPattern", "add_nn.relu_")
add_relu_call = relay.Call(add_relu, [inputs[i * 2], inputs[i * 2 + 1]])
add_relu_calls.append(add_relu_call)
add = relay.add(add_relu_calls[0], add_relu_calls[1])
sub = relay.subtract(add_relu_calls[2], add_relu_calls[3])
out = relay.multiply(add, sub)
return relay.Function(inputs, out)
pattern_table = [
("add_sub_mul", make_add_sub_mul_pattern()),
("add_relu", make_add_relu_pattern()),
]
check_result(pa |
ttern_table, before()["A"], after_A())
check_result(pattern_table, before()["B"], after_B())
def test_tuple_get_item_merge():
"""Test composite function can be merged from pattern containing TupleGetItem nodes."""
pattern_table = [("bn_relu", make_bn_relu_pattern())]
def before():
x = relay.var("x", shape=(1, 8))
gamma = relay.var("gamma", shape=(8,))
beta = relay.var("beta", shape=(8,))
moving_mean = relay.var("moving_mean", shape=(8,))
moving_var = relay.var("moving_var", shape=(8,))
bn_node = relay.nn.batch_norm(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = bn_node[0]
r = relay.nn.relu(tuple_get_item_node)
return relay.Function([x, gamma, beta, moving_mean, moving_var], r)
def expected():
x = relay.var("x", shape=(1, 8))
beta = relay.var("beta", shape=(8,))
gamma = relay.var("gamma", shape=(8,))
moving_mean = relay.var("moving_mean", shape=(8,))
moving_var = relay.var("moving_var", shape=(8,))
in_1 = relay.var("x1", shape=(1, 8))
in_2 = relay.var("gamma1", shape=(8,))
in_3 = relay.var("beta1", shape=(8,))
in_4 = relay.var("moving_mean1", shape=(8,))
in_5 = relay.var("moving_var1", shape=(8,))
bn_node = relay.nn.batch_norm(in_1, in_2, in_3, in_4, in_5)
tuple_get_item_node = bn_node[0]
relu_node = relay.nn.relu(tuple_get_item_node)
bn_relu = relay.Function([in_1, in_2, in_3, in_4, in_5], relu_node)
bn_relu = bn_relu.with_attr("Composite", "bn_relu")
bn_relu = bn_relu.with_attr(
"PartitionedFromPattern", "nn.batch_norm_TupleGetItem0_nn.relu_"
)
r = relay.Call(bn_relu, [x, gamma, beta, moving_mean, moving_var])
return relay.Function([x, gamma, beta, moving_mean, moving_var], r)
check_result(pattern_table, before(), expected())
def test_pattern_with_check():
def before():
x = relay.var("x", shape=(1, 10, |
10, 10))
w = relay.var("w", shape=(10, 10, 3, 3))
b = relay.var("b", shape=(8,))
conv = relay.nn.conv2d(x, w, kernel_size=(3, 3), kernel_layout="OIHW", data_layout="NHWC")
bias = relay.nn.bias_add(conv, b)
relu = relay.nn.relu(bias)
return relay.Function([x, w, b], relu)
def _check_true(extract):
conv = extract.args[0].args[0]
return conv.attrs.data_layout == "NHWC"
def _check_false(extract):
conv = extract.args[0].args[0]
return conv.attrs.data_layout == "NCHW"
def expected():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
conv = relay.nn.conv2d(x, w, kernel_size=(3, 3), kernel_layout="OIHW", data_layout="NHWC")
bias = relay.nn.bias_add(conv, b)
relu = relay.nn.relu(bias)
func = relay.Function([x, w, b], relu)
func = func.with_attr("Composite", "conv_bias_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")
x = relay.var("x", shape=(1, 10, 10, 10))
w = relay.var("w", shape=(10, 10, 3, 3))
b = relay.var("b", shape=(8,))
return relay.Function([x, w, b], func(x, w, b))
pattern_table_false = [("conv_bias_relu", make_conv_bias_relu_pattern(), _check_false)]
check_result(pattern_table_false, before(), before())
pattern_table_true = [("conv_bias_relu", make_conv_bias_relu_pattern(), _check_true)]
check_result(pattern_table_true, before(), expected())
def test_diamond_not_merge():
r"""
The pattern on the left shouldn't match the structure on the right
relu relu
| \ | \
| clip | add
| / | |
mul | clip
| /
mul
"""
def get_pattern():
conv = make_conv_bias_relu_pattern()
clip = is_op("clip")(conv, wildcard(), wildcard())
return is_op("multiply")(conv, clip)
def get_net():
data = r |
elay.var("data", shape=(1, 512, 28, 28))
kernel = relay.var("kernel", shape=(256, 512, 1, 1))
conv = relay.nn.conv2d(data, kernel, kernel_size=(1, 1), padding=(0, 0), strides=(1, 1))
bias = relay.nn.bias_add(conv, relay.var("bias", shape=(256,)))
relu = relay.nn.relu(bias)
add = relay.op.add(relu, relay.const(1.0))
clip2 = relay.op.clip(add, 0, 255)
mul = relay.op.multiply(relu, clip2)
return relay.Function(relay.analysis.free_vars(mul), mul)
pattern_table = [("pat", get_pattern())]
net = get_net()
check_result(pattern_table, net, net)
def test_type_check():
"""Test that we can query tensor types in the 'check' function."""
def before():
x = relay.var("x", shape=(1, 10, 10, 10))
w = relay.var("w", shape=(10, 10, 3, 3))
b = relay.var("b", shape=(8,))
add = relay.op.add(x, x)
relu = relay.nn.relu(add)
conv = relay.nn.conv2d(
relu, w, kernel_size=(3, 3), kernel_layout="OIHW", data_layout="NHWC"
)
bias = relay.nn.bias_add(conv, b)
relu2 = relay.nn.relu(bias)
return run_opt_pass(relay.Function([x, w, b], relu2), relay.transform.InferType())
def expected_false():
x = relay.var("x", shape=(1, 10, 10, 10))
w = relay.var("w", shape=(10, 10, 3, 3))
b = relay.var("b", shape=(8,))
x0 = relay.var("x")
add = relay.op.add(x0, x0)
relu = relay.nn.relu(add)
func = relay.Function([x0], relu)
func = func.with_attr("PartitionedFromPattern", "add_nn.relu_")
func = func.with_attr("Composite", "add_relu")
call = relay.Call(func, [x])
conv = relay.nn.conv2d(
call, w, kernel_size=(3, 3), kernel_layout="OIHW", data_layout="NHWC"
)
bias = relay.nn.bias_add(conv, b)
relu2 = relay.nn.relu(bias)
return relay.Function([x, w, b], relu2)
def expected_true():
x = relay.var("x", shape=(1, 10, 10, 10))
w = relay.v |
ar("w", shape=(10, 10, 3, 3))
b = relay.var("b", shape=(8,))
x0 = relay.var("x")
add = relay.op.add(x0, x0)
relu = relay.nn.relu(add)
func = relay.Function([x0], relu)
func = func.with_attr("PartitionedFromPattern", "add_nn.relu_")
func = func.with_attr("Composite", "add_relu")
call = relay.Call(func, [x])
x2 = relay.var("x")
w1 = relay.var("w")
b1 = relay.var("b")
conv = relay.nn.conv2d(x2, w1, kernel_size=(3, 3), kernel_layout="OIHW", data_layout="NHWC")
bias = relay.nn.bias_add(conv, b1)
relu2 = relay.nn.relu(bias)
func = relay.Function([x2, w1, b1], relu2)
func = func.with_attr("Composite", "conv_bias_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")
call = relay.Call(func, [call, w, b])
return relay.Function([x, w, b], call)
def _check_type_true(extract):
conv = extract.args[0].args[0]
typ = conv.checked_type
return bool(typ.shape[0] == 1)
def _check_type_false(extract):
conv = extract.args[0].args[0]
typ = conv.checked_type
return bool(typ.shape[0] != 1)
pattern_table_false = [
("add_relu", make_add_relu_pattern()),
("conv_bias_relu", make_conv_bias_relu_pattern(), _check_type_false),
]
check_result(pattern_table_false, before(), expected_false())
pattern_table_true = [
("add_relu", make_add_relu_pattern()),
("conv_bias_relu", make_conv_bias_relu_pattern(), _check_type_true),
]
check_result(pattern_table_true, before(), expected_true())
if __name__ == "__main__":
pytest.main([__file__]) |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.relay.prelude |
import Prelude
from tvm.relay |
import op, create_executor, transform
from tvm.relay |
import Var, TypeVar, TupleGetItem, Let, Function, const, RefRead, RefWrite, RefCreate
from tvm.relay |
import TensorType, Tuple, If, Clause, PatternConstructor, PatternVar, Match
from tvm.relay |
import GlobalVar, Call
from tvm.relay.transform |
import gradient
from tvm.relay.testing |
import make_nat_expr, run_infer_type
def check_eval(expr, expected_result, mod=None, rtol=1e-07):
dev = tvm.device("llvm", 0)
result = create_executor(mod=mod, device=dev, target="llvm").evaluate(expr)
np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def tipe(expr):
return run_opt_pass(expr, [transform.PartialEvaluate(), transform.InferType()])
def dcpe(expr, mod=None, grad=False, ignore_impurity=False):
passes = [
transform.PartialEvaluate(),
transform.InferType(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=ignore_impurity),
transform.InferType(),
]
if grad:
expr = gradient(run_infer_type(expr))
if mod:
assert isinstance(expr, Function)
mod["main"] = expr
seq = tvm.transform.Sequential(passes)
mod = seq(mod)
return mod["main"]
return run_opt_pass(expr, passes)
def test_tuple():
t = TypeVar("t")
x = Var("x", t)
body = TupleGetItem(relay.Tuple([relay.const(4.0), x]), 1)
f = Function([x], body, None, [t])
expected = relay.Function([x], x, None, [t])
expected = run_opt_pass(expected, transform.InferType())
assert tvm.ir.structural_equal(dcpe(f), expected)
def test_const_inline():
t = relay.TensorType([], "float32")
d = Var("d", t)
double = Function([d], d + d)
orig = double(const(4.0))
assert tvm.ir.structural_equal(dcpe(orig), const(8.0))
def test_ref():
t = relay.TensorType([], "float32")
d = relay.Var("d", t)
r = relay.Var("r", relay.RefType(t))
x = relay.Var("x")
body = relay.RefRead(r)
body = Let(x, RefWrite(r, RefRead(r) * RefRead(r)), |
body)
body = Let(r, RefCreate(d), body)
square = Function([d], body)
expected = run_opt_pass(Function([d], d * d), transform.InferType())
actual = dcpe(square, ignore_impurity=True)
assert tvm.ir.structural_equal(actual, expected)
def test_empty_ad():
shape = (10, 10)
dtype = "float32"
t = TensorType(shape, dtype)
d = Var("d", t)
f = Function([d], d)
g = dcpe(f, grad=True, ignore_impurity=True)
expected = Function([d], Tuple([d, Tuple([op.ones_like(d)])]))
expected = run_opt_pass(expected, transform.InferType())
assert tvm.ir.structural_equal(g, expected)
def test_ad():
shape = (10, 10)
dtype = "float32"
t = TensorType(shape, dtype)
d = Var("d", t)
f = Function([d], d * d)
g = dcpe(f, grad=True, ignore_impurity=True)
m = d * d
x = relay.Var("x")
o = op.ones_like(x)
x1 = relay.Var("x1")
grad = op.zeros_like(d) + op.collapse_sum_like(x1 * d, d) + op.collapse_sum_like(x1 * d, d)
body = Tuple([x, Tuple([grad])])
body = relay.Let(x1, o, body)
expected = Function([d], relay.Let(x, m, body))
expected = run_opt_pass(expected, transform.InferType())
tvm.ir.assert_structural_equal(g, expected)
def test_if_ref():
shape = ()
dtype = "bool"
t = TensorType(shape, dtype)
d = Var("d", t)
r = Var("r")
update = Function([], RefWrite(r, RefRead(r) + RefRead(r)))
u = Var("u")
body = If(d, u(), u())
eff = Var("eff")
body = Let(eff, body, RefRead(r))
f = Function([d], Let(r, RefCreate(const(1)), Let(u, update, body)))
pe_f = tipe(f)
f_res = create_executor().evaluate(f)(const(True))
pe_f_res = create_executor().evaluate(pe_f)(const(True))
np.testing.assert_allclose(f_res.numpy(), 2 * np.ones_like(f_res.numpy()))
np.testing.assert_allclose(pe_f_res.numpy(), 2 * np.ones_like(pe_f_res.numpy()))
def test_function_invalidate():
shape = ()
dtype = "bool"
t = TensorType(shape, dtype)
d = Var("d", t)
r = Var("r")
f |
etch = Function([], RefRead(r))
fet = Var("fetch")
fet_obscured = Var("fetch_obscured")
u = Var("u")
body = If(d, fet_obscured(), fet_obscured())
body = Let(u, RefWrite(r, const(1)), body)
body = Let(fet_obscured, If(d, fet, fet), body)
body = Let(fet, fetch, body)
body = Let(r, RefCreate(const(0)), body)
f = Function([d], body)
pe_f = tipe(f)
f_res = create_executor().evaluate(f)(const(True))
pe_f_res = create_executor().evaluate(pe_f)(const(True))
np.testing.assert_allclose(f_res.numpy(), np.ones_like(f_res.numpy()))
np.testing.assert_allclose(pe_f_res.numpy(), np.ones_like(pe_f_res.numpy()))
def test_head_cons():
mod = tvm.IRModule()
p = Prelude(mod)
t = TypeVar("t")
x = Var("x", t)
rlist, cons, nil = p.mod.get_type("List")
hd = p.mod.get_global_var("hd")
body = hd(cons(x, nil()))
f = Function([x], body, None, [t])
res = dcpe(f, mod)
expected_mod = tvm.IRModule.from_expr(Function([x], x, t, [t]))
assert tvm.ir.structural_equal(res, expected_mod["main"])
def test_map():
mod = tvm.IRModule()
p = Prelude(mod)
rlist, cons, nil = p.mod.get_type("List")
rmap = p.mod.get_global_var("map")
f = GlobalVar("f")
t = TypeVar("t")
a = Var("a", t)
mod[f] = Function([a], a, t, [t])
orig = rmap(f, cons(const(1), cons(const(2), cons(const(3), nil()))))
expected = cons((const(1)), cons((const(2)), cons((const(3)), nil())))
expected = Function([], expected)
mod["main"] = expected
mod = transform.InferType()(mod)
expected = mod["main"]
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, expected.body)
def test_loop():
mod = tvm.IRModule()
t = TypeVar("t")
x = Var("x", t)
loop = GlobalVar("loop")
mod[loop] = Function([x], loop(x), t, [t])
expected = Call(loop, [const(1)])
mod["main"] = Function([], expected)
mod = transform.InferType()(mod)
expected = mod["main"].body
call = Functi |
on([], loop(const(1)))
res = dcpe(call, mod=mod)
assert tvm.ir.structural_equal(res.body, expected)
def test_swap_loop():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, _, _ = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
loop = GlobalVar("loop")
mod[loop] = Function([x, y], loop(y, x), nat())
prog = loop(make_nat_expr(p, 1), make_nat_expr(p, 2))
res = Function([], prog)
res = dcpe(res, mod=mod)
assert tvm.ir.structural_equal(prog, res.body)
def test_abs_diff():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
xp = Var("x'", nat())
yp = Var("y'", nat())
diff = GlobalVar("diff")
y_z_case = Clause(PatternConstructor(z, []), x)
y_s_case = Clause(PatternConstructor(s, [PatternVar(yp)]), diff(yp, xp))
x_z_case = Clause(PatternConstructor(z, []), y)
x_s_case = Clause(PatternConstructor(s, [PatternVar(xp)]), Match(y, [y_z_case, y_s_case]))
mod[diff] = Function([x, y], Match(x, [x_z_case, x_s_case]))
orig = diff(make_nat_expr(p, 7), make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 4))
def test_match_nat_id():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
nat_id = GlobalVar("nat_id")
z_case = Clause(PatternConstructor(z, []), z())
s_case = Clause(PatternConstructor(s, [PatternVar(y)]), s(y))
mod[nat_id] = Function([x], Match(x, [z_case, s_case]))
orig = nat_id(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_nat_id():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, _, _ = p.mo |
d.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
nat_id = GlobalVar("nat_id")
mod[nat_id] = Function([x], x)
orig = nat_id(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_global_match_nat_id():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
x = Var("x", nat())
z_case = Clause(PatternConstructor(z, []), z())
s_case = Clause(PatternConstructor(s, [PatternVar(x)]), s(x))
orig = Match(make_nat_expr(p, 3), [z_case, s_case])
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_double():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
double = p.mod.get_global_var("nat_double")
orig = double(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 6))
def test_concat():
t = relay.TensorType([10], "float32")
x = Var("x", t)
y = Var("x", t)
orig = run_infer_type(Function([x, y], op.concatenate([x, y], axis=0)))
tvm.ir.assert_structural_equal(dcpe(orig), orig)
def test_triangle_number():
t = relay.TensorType([], "int32")
x = Var("x", t)
f_var = Var("f")
f = Function([x], If(op.equal(x, const(0)), const(0), x + f_var(x - const(1))))
orig = run_infer_type(Let(f_var, f, f_var(const(10))))
tvm.ir.assert_structural_equal(dcpe(orig), const(55))
def test_nat_update():
m = tvm.IRModule()
p = Prelude(m)
p.mod.import_from_std("nat.rly")
m = transform.ToANormalForm()(m)
transform.PartialEvaluate()(m)
def test_tuple_match():
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), |
[clause])
tvm.ir.assert_structural_equal(dcpe(x), const(2))
if __name__ == "__main__":
tvm.testing.main() |
"""Unit tests for graph partitioning.""" |
import os |
import sys |
import numpy as np |
import tvm
from tvm.relay.backend |
import te_compiler
from tvm.relay.backend.runtime |
import Runtime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.