text
stringlengths 1
2.05k
|
---|
attern)
assert ty_pat.type == ttype
def test_DataTypePattern():
dtype = "float16"
pattern = has_dtype(dtype)
assert isinstance(pattern, DataTypePattern)
assert pattern.dtype == dtype
def test_ShapePattern():
shape = [10, 10]
pattern = has_shape(shape)
assert isinstance(pattern, ShapePattern)
assert tvm.ir.structural_equal(pattern.shape, shape)
def test_AttrPattern():
op = is_op("add").has_attr({"TOpPattern": K_ELEMWISE})
assert isinstance(op, AttrPattern)
assert op.attrs["TOpPattern"] == K_ELEMWISE
def test_IfPattern():
x = is_var("x")
y = is_var("y")
pat = is_if(is_op("less")(x, y), x, y)
assert isinstance(pat, IfPattern)
assert isinstance(pat.cond, CallPattern)
assert isinstance(pat.true_branch, VarPattern)
assert isinstance(pat.false_branch, VarPattern)
def test_LetPattern():
x = is_var("x")
y = is_var("y")
let_var = is_var("let")
pat = is_let(let_var, is_op("less")(x, y), let_var)
assert isinstance(pat, LetPattern)
assert isinstance(pat.var, VarPattern)
assert isinstance(pat.value, CallPattern)
assert isinstance(pat.body, VarPattern)
def test_match_op():
assert is_op("add").match(relay.op.op.get("add"))
def test_no_match_op():
assert not is_op("add").match(relay.op.op.get("subtract"))
def test_match_op_or():
is_add_or_sub = is_op("add") | is_op("subtract")
assert is_add_or_sub.match(relay.op.op.get("add"))
assert is_add_or_sub.match(relay.op.op.get("subtract"))
def test_match_call_commutive():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(is_var("x"), is_var("y"))
assert add_pattern.match(x + y)
assert add_pattern.match(y + x)
mul_pattern = is_op("multiply")(is_var("x"), is_var("y"))
assert mul_pattern.match(x * y)
assert mul_pattern.match(y * x)
def test_no_match_call_commutive():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("subtract")(is_var("x"), is_var("y"))
assert add_patter |
n.match(x - y)
assert not add_pattern.match(y - x)
add_pattern = is_op("divide")(is_var("x"), is_var("y"))
assert add_pattern.match(x / y)
assert not add_pattern.match(y / x)
def test_match_call():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
assert add_pattern.match(x + y)
call_pattern = wildcard()(None)
assert call_pattern.match(relay.op.nn.relu(x))
assert call_pattern.match(relay.op.add(x, y))
def test_no_match_call():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
assert not add_pattern.match(x - y)
def test_match_func():
x = relay.var("x")
y = relay.var("y")
wc1 = wildcard()
wc2 = wildcard()
func_pattern = FunctionPattern([wc1, wc2], wc1 + wc2)
assert func_pattern.match(relay.Function([x, y], x + y))
func_pattern = FunctionPattern(None, wildcard())
assert func_pattern.match(relay.Function([x], x))
assert func_pattern.match(relay.Function([x, y], x + y))
def test_no_match_func():
x = relay.var("x")
y = relay.var("y")
wc1 = wildcard()
wc2 = wildcard()
func_pattern = FunctionPattern([wc1, wc2], wc1 + wc2)
assert not func_pattern.match(relay.Function([x, y], x - y))
def test_match_if():
x = is_var("x")
y = is_var("y")
pat = is_if(is_op("less")(x, y), x, y)
x = relay.var("x")
y = relay.var("y")
cond = x < y
assert pat.match(relay.expr.If(cond, x, y))
def test_no_match_if():
x = is_var("x")
y = is_var("y")
pat = is_if(is_op("less")(x, y), x, y)
x = relay.var("x")
y = relay.var("y")
assert not pat.match(relay.expr.If(x > y, x, y))
assert not pat.match(relay.expr.If(x < y, y, x))
def test_match_let():
x = is_var("x")
y = is_var("y")
let_var = is_var("let")
pat = is_let(let_var, is_op("less")(x, y), let_var)
x = relay.var("x")
y = relay.var("y")
lv = relay.var("let")
cond = x < y
assert pat.mat |
ch(relay.expr.Let(lv, cond, lv))
def test_no_match_let():
x = is_var("x")
y = is_var("y")
let_var = is_var("let")
pat = is_let(let_var, is_op("less")(x, y), let_var)
x = relay.var("x")
y = relay.var("y")
lv = relay.var("let")
assert not pat.match(relay.expr.Let(lv, x > y, lv))
assert not pat.match(relay.expr.Let(lv, x < y, lv * x))
def test_match_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
pattern = is_op("nn.relu")(
is_op("nn.conv2d")(wildcard(), wildcard()).optional(
lambda x: is_op("nn.bias_add")(x, wildcard())
)
)
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
assert pattern.match(relu)
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
assert pattern.match(relu)
pattern = is_op("nn.conv2d")(wildcard(), wildcard())
pattern = pattern.optional(is_op("nn.relu")).optional(is_op("tanh"))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
tanh = relay.op.tanh(conv2d)
tanh2 = relay.op.tanh(relu)
relu2 = relay.op.nn.relu(tanh)
assert pattern.match(conv2d)
assert pattern.match(relu)
assert pattern.match(tanh)
assert pattern.match(tanh2)
assert not pattern.match(relu2)
def test_no_match_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
pattern = is_op("nn.relu")(
is_op("nn.conv2d")(wildcard(), wildcard()).optional(
lambda x: is_op("nn.bias_add")(x, wildcard())
)
)
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.tanh(conv2d)
assert not pattern.match(relu)
conv2d = relay.op.nn.dense(x, w)
relu = relay.op.tanh(conv2d)
assert not pattern.match(relu)
conv2d = relay.op.nn.dense(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
assert not pattern.match(relu)
conv2d = relay.op.nn.conv2d(x, w)
b |
ias_add = conv2d + w
relu = relay.op.nn.relu(bias_add)
assert not pattern.match(relu)
def test_match_const():
conv2d = is_op("nn.conv2d")(wildcard(), is_constant())
pattern = is_op("nn.bias_add")(conv2d, wildcard())
x = relay.var("x", shape=(1, 3, 224, 224))
w = relay.var("w", shape=(3, 3, 3, 3))
b = relay.var("b", shape=(3,))
conv2d = relay.op.nn.conv2d(x, w)
out = relay.op.nn.bias_add(conv2d, b)
func = relay.Function([x, w, b], out)
mod = tvm.IRModule.from_expr(func)
assert not pattern.match(mod["main"].body)
mod["main"] = bind_params_by_name(mod["main"], {"w": tvm.nd.array(np.ones(shape=(3, 3, 3, 3)))})
assert pattern.match(mod["main"].body)
def test_match_tuple():
x = relay.var("x")
y = relay.var("y")
z = relay.op.op.get("add")
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
assert tuple_pattern.match(relay.expr.Tuple((x, y, z)))
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern, 1)
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 1))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern)
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 0))
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 1))
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 2))
tuple_pattern = is_tuple(None)
concat_pattern = is_op("concatenate")(tuple_pattern)
assert concat_pattern.match(relay.op.concatenate(relay.expr.Tuple((x,)), axis=0))
assert concat_pattern.match(relay.op.concatenate(relay.expr.Tuple((x, y)), axis=0))
assert concat_pattern.match(relay.op.concatenate(relay.expr.Tuple((x, y, z)), axis=0))
def test_no_match_tuple():
x = relay.var("x")
y = relay.var("y")
z = relay.op.op.get("add")
tuple_pattern = is_tuple((is_var("x"), w |
ildcard(), is_op("add"), wildcard()))
assert not tuple_pattern.match(relay.expr.Tuple((x, y, z)))
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern, 1)
assert not tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 2))
def test_match_type():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_type(relay.TensorType((10, 10), "float32"))
assert ty_pat.match(x)
def test_no_match_type():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_type(relay.TensorType((10, 10), "float32"))
assert not ty_pat.match(x)
def test_match_dtype():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_dtype("float32")
assert ty_pat.match(x)
def test_no_match_dtype():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_dtype("float32")
assert not ty_pat.match(x)
def test_match_shape():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_shape((10, 10))
assert ty_pat.match(x)
def test_no_match_shape():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_shape((10, 5))
assert not ty_pat.match(x)
def test_match_op_attr():
op = is_op("add").has_attr({"TOpPattern": K_BROADCAST})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert op_pat.match(x + y)
def test_no_match_op_attr():
op = is_op("nn.dense").has_attr({"TOpPattern": K_ELEMWISE})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert not op_pat.match(relay.op.nn.dense(x, y))
op = is_op("add").has_attr({"TOpPattern": K_BROADCAST})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert not op_pat.match(x - y)
z = relay.var("z")
assert not op_pat.match(relay.Let(z, x + y, z))
def test_match_func_attr():
pattern = wildcard().has_attr({"Composite": "add"})
x |
= relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("Composite", "add")
assert pattern.match(f)
def test_no_match_func_attr():
pattern = wildcard().has_attr({"Composite": "add"})
x = relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("RandomTest", "add")
assert not pattern.match(f)
f = relay.Function([x, y], x + y).with_attr("Composite", "conv_bias")
assert not pattern.match(f)
def test_match_call_attr():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"data_layout": "NCHW"})
x = relay.var("x")
y = relay.var("y")
assert is_conv2d.match(relay.op.nn.conv2d(x, y))
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"kernel_size": [3, 3]})
out = relay.op.nn.conv2d(x, y, kernel_size=[3, 3])
assert is_conv2d.match(out)
attr_dict = {"call_attr": "attr"}
call_has_attr = wildcard()(wildcard()).has_attr(attr_dict)
call_attr = tvm.ir.make_node("DictAttrs", **attr_dict)
a = relay.Var("a")
b = relay.Var("b")
assert call_has_attr.match(relay.Call(a, [b], attrs=call_attr))
empty_attrs = tvm.ir.make_node("DictAttrs", **{})
call_has_empty_attrs = wildcard()(wildcard()).has_attr({})
assert call_has_empty_attrs.match(relay.Call(a, [b], attrs=empty_attrs))
assert call_has_empty_attrs.match(relay.Call(a, [b], attrs=call_attr))
def test_no_match_call_attr():
x = relay.var("x")
y = relay.var("y")
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"data_layout": "NHWC"})
assert not is_conv2d.match(relay.op.nn.conv2d(x, y))
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"RandomAttr": "NCHW"})
assert not is_conv2d.match(relay.op.nn.conv2d(x, y))
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"kernel_size": [3, 3]})
out = relay.op.nn.conv2d(x, y, kernel_size=[2, 1])
assert not is_conv2d.match(out)
call_has_attr = wildc |
ard()(wildcard()).has_attr({"call_attr": "attr"})
wrong_key = tvm.ir.make_node("DictAttrs", **{"wrong": "attr"})
wrong_value = tvm.ir.make_node("DictAttrs", **{"call_attr": "wrong"})
empty_attrs = tvm.ir.make_node("DictAttrs", **{})
a = relay.Var("a")
b = relay.Var("b")
assert not call_has_attr.match(relay.Call(a, [b]))
assert not call_has_attr.match(relay.Call(a, [b], attrs=wrong_key))
assert not call_has_attr.match(relay.Call(a, [b], attrs=wrong_value))
assert not call_has_attr.match(relay.Call(a, [b], attrs=empty_attrs))
def test_match_call_attr_dtype():
is_cast = is_op("cast")(wildcard()).has_attr({"dtype": "float32"})
x = relay.var("x")
assert is_cast.match(relay.op.cast(x, "float32"))
def test_match_diamond():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
assert diamond.match(out)
def test_no_match_diamond():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
assert not diamond.match(leaky_relu)
assert not diamond.match(relu)
def test_match_fake_diamond():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
input1 = relay.var("input1")
weight1 = relay.var(" |
weight1")
conv2d1 = relay.op.nn.conv2d(input1, weight1)
inp2 = relay.var("input2")
weight2 = relay.var("weight2")
conv2d2 = relay.op.nn.conv2d(inp2, weight2)
relu = relay.op.nn.relu(conv2d1)
leaky_relu = relay.op.nn.leaky_relu(conv2d2, alpha=0)
out = relu + leaky_relu
assert not diamond.match(out)
def test_at_most_one_parent():
P = is_op("nn.conv2d")(wildcard(), wildcard())
I = is_op("nn.relu")(wildcard())
C = is_op("add")(wildcard(), wildcard())
pattern = dominates(P, I, C)
x = relay.var("x")
w = relay.var("w")
n6 = relay.op.nn.conv2d(x, w)
n7 = relay.op.tanh(n6)
n8 = relay.op.nn.conv2d(n7, w)
n9 = relay.op.nn.relu(n8)
n10 = relay.op.nn.relu(n6)
n11 = relay.add(n9, n10)
assert not pattern.match(n11)
def test_match_dominator():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
assert diamond.match(out)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
assert diamond.match(out)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = |
relu + tanh
assert diamond.match(out)
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()) | is_op(
"add"
)(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = tanh + leaky_relu
assert diamond.match(out)
def test_not_match_dominator():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
input1 = relay.var("input1")
weight1 = relay.var("weight1")
conv2d1 = relay.op.nn.conv2d(input1, weight1)
inp2 = relay.var("input2")
weight2 = relay.var("weight2")
conv2d2 = relay.op.nn.conv2d(inp2, weight2)
relu = relay.op.nn.relu(conv2d1)
leaky_relu = relay.op.nn.leaky_relu(conv2d2, alpha=0)
out = relu + leaky_relu
assert not diamond.match(out)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
assert not diamond.match(out)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(inp)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
assert not diamond.match(out)
inp = relay.var("input")
relu = relay.op.nn.relu(inp)
relu = relay.op.nn.relu(r |
elu)
tanh = relay.op.tanh(relu)
out = relu + tanh
assert not diamond.match(out)
def test_match_typed_dominator():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float32"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 3, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input", relay.TensorType((1, 3, 12, 12), "float32"))
weight = relay.var("weight", relay.TensorType((3, 3, 3, 3), "float32"))
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
assert diamond.match(out)
def test_no_match_typed_dominator():
inp = relay.var("input", relay.TensorType((1, 3, 12, 12), "float32"))
weight = relay.var("weight", relay.TensorType((3, 3, 3, 3), "float32"))
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float32"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 1, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
assert not diamond.match(out)
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float16"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 3, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
assert not diamond.match(out)
def test_rewrite():
x = relay.var("x")
y = relay. |
var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
sub_pattern = is_op("subtract")(wildcard(), wildcard()) |
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
out = rewrite(TestRewrite(), x + y)
assert sub_pattern.match(out)
def test_rewrite_func():
x = relay.var("x")
w = relay.var("w")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
sub_pattern = is_op("subtract")(wildcard(), wildcard()) |
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
inpf = relay.var("input")
weightf = relay.var("weight")
func = relay.Function(
[inpf, weightf], relay.op.nn.relu(relay.op.nn.conv2d(inpf, weightf)), attrs=None
)
out = rewrite(TestRewrite(), func(x, w) + y)
assert sub_pattern.match(out)
def test_rewrite_func_with_attr():
x = relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("Composite", "add")
a = relay.var("a")
b = relay.var("b")
c = relay.Call(f, [a, b])
c_abs = relay.abs(c) |
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = wildcard().has_attr({"Composite": "add"})(wildcard(), wildcard())
def callback(self, pre, post, node_map):
return post.args[0] + post.args[1]
out = rewrite(TestRewrite(), c_abs)
inlined_add_pattern = is_op("abs")(is_op("add")(wildcard(), wildcard()))
assert inlined_add_pattern.match(out)
def test_nested_rewrite(): |
class PatternCallback(DFPatternCallback):
def __init__(self, pattern):
super(PatternCallback, self).__init__()
self.pattern = pattern
def callback(self, pre, post, node_map):
return post
def gen():
x = relay.var("x")
y = relay.var("y")
y_add = relay.add(y, y)
n0 = relay.add(x, y_add)
n1 = relay.add(x, n0)
return relay.add(n1, n0)
def pattern():
a = wildcard()
b = wildcard()
n0 = is_op("add")(a, b)
n1 = is_op("add")(n0, a)
return is_op("add")(n0, n1)
out = gen()
pat = pattern()
new_out = rewrite(PatternCallback(pat), out)
assert tvm.ir.structural_equal(out, new_out)
def test_not_fuse_multi_diamond():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
out = out + conv2d
assert not diamond.match(out) |
class BatchnormCallback(DFPatternCallback):
def __init__(self):
super(BatchnormCallback, self).__init__()
self.x = wildcard()
self.var = wildcard()
self.mean = wildcard()
self.beta = wildcard()
self.gamma = wildcard()
self.eps = is_constant()
self.pattern = (
self.gamma * (self.x - self.mean) / is_op("sqrt")(self.var + self.eps) + self.beta
)
def callback(self, pre, post, node_map):
x = node_map[self.x][0]
var = node_map[self.var][0]
mean = node_map[self.mean][0]
beta = node_map[self.beta][0]
gamma = node_map[self.gamma][0]
eps = node_map[self.eps][0]
return relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=eps.data.numpy().item())[0]
def test_fuse_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
def test_no_fuse_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
fake_BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) - beta
out = rewrite(BatchnormCallback(), fake_BN)
assert tvm.ir.structural_equal(out, fake_BN)
def test_fuse_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN2)
bn = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
bn2 = relay.op.nn.batch_norm( |
bn, gamma, beta, mean, var, epsilon=1e-5)[0]
assert tvm.ir.structural_equal(out, bn2)
def test_partial_fuse_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) - beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN2)
bn2 = relay.op.nn.batch_norm(BN, gamma, beta, mean, var, epsilon=1e-5)[0]
assert tvm.ir.structural_equal(out, bn2)
def test_fuse_batchnorm_commutation():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = beta + gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5))
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
BN = (gamma * (x - mean)) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
BN = gamma * ((x - mean) / relay.op.sqrt(var + relay.const(1e-5))) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
def test_quadruple_rewrite_dominator(): |
class DominatorRemovalCallback(DFPatternCallback):
def __init__(self):
super(DominatorRemovalCallback, self).__init__()
self.inp = wildcard()
self.weight = wildcard()
is_conv2d = is_op("nn.conv2d")(self.inp, self.weight)
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(
wildcard()
) | is_op("add")(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
self.pattern = dominates(is_conv2d, is_unary_elemwise, reduction)
def callback(self, pre, post, node_map):
inp = node_map[self.inp][0]
weight = node_map[self.weight][0]
return relay.op.nn.conv2d(inp, weight)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = relu + tanh
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = tanh + leaky_relu
one = relay.op.nn.conv2d(inp, weight)
two = relay.op.nn.conv2d(one, weight)
three = relay.op.nn.conv2d(two, weight)
four = relay.op.nn.conv2d(three, weight)
assert tvm.ir.structural_equal(DominatorRemovalCallback().rewrite(out), four)
def algebraic_simplify(expr):
zero = is_expr(relay.const(0)) | is_expr(relay.const(0.0))
one = is_expr(relay.co |
nst(1)) | is_expr(relay.const(1.0)) |
class ElwiseNullCallback(DFPatternCallback):
def callback(self, pre, post, node_map):
return node_map[self.x][0] |
class AddCallback(ElwiseNullCallback):
def __init__(self):
super(AddCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x + zero |
class SubCallback(ElwiseNullCallback):
def __init__(self):
super(SubCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x - zero |
class MulCallback(ElwiseNullCallback):
def __init__(self):
super(MulCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x * one |
class DivCallback(ElwiseNullCallback):
def __init__(self):
super(DivCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x / one |
class MulZeroCallback(ElwiseNullCallback):
def __init__(self):
super(MulZeroCallback, self).__init__()
self.x = zero
self.pattern = self.x * wildcard() |
class ZeroDivCallback(ElwiseNullCallback):
def __init__(self):
super(ZeroDivCallback, self).__init__()
self.x = zero
self.pattern = self.x / wildcard()
return rewrite(
[
AddCallback(),
SubCallback(),
MulCallback(),
DivCallback(),
MulZeroCallback(),
ZeroDivCallback(),
],
expr,
)
def test_algebraic_simplify():
x = relay.Var("x")
y = relay.Var("y")
one = relay.const(1)
zero = relay.const(0)
onef = relay.const(1.0)
zerof = relay.const(0.0)
assert algebraic_simplify(x + zero) == x
assert algebraic_simplify(x + zerof) == x
assert algebraic_simplify(zero + x) == x
assert algebraic_simplify(zerof + x) == x
assert algebraic_simplify(x - zero) == x
assert algebraic_simplify(x - zerof) == x
assert algebraic_simplify(x * one) == x
assert algebraic_simplify(x * onef) == x
assert algebraic_simplify(one * x) == x
assert algebraic_simplify(onef * x) == x
assert algebraic_simplify(x * zero) == zero
assert algebraic_simplify(x * zerof) == zerof
assert algebraic_simplify(x / one) == x
assert algebraic_simplify(x / onef) == x
assert algebraic_simplify(zero / x) == zero
assert algebraic_simplify(zerof / x) == zerof
assert tvm.ir.structural_equal(
algebraic_simplify((x + zero * y) / one + (y * one) - zero / x), x + y
)
def test_double_partition():
conv2d_p = is_op("nn.conv2d")(wildcard(), wildcard())
bias_add_p = is_op("nn.bias_add")(conv2d_p, wildcard())
relu_p = is_op("nn.relu")(bias_add_p)
x = relay.var("input")
w = relay.var("weight")
b = relay.var("bias")
w2 = relay.var("weight")
b2 = relay.var("bias")
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
conv2d2 = relay.op.nn.conv2d(relu, w2)
bias_add2 = relay.op.nn.bias_add(conv2d2, b2)
partitione |
d = bias_add2
for pat, label in [(relu_p, "conv_bias_relu"), (bias_add_p, "conv_bias")]:
partitioned = pat.partition(partitioned, {"Composite": label})
inpf = relay.var("input")
weightf = relay.var("weight")
biasf = relay.var("bias")
func0 = (
relay.Function(
[inpf, weightf, biasf],
relay.op.nn.relu(relay.op.nn.bias_add(relay.op.nn.conv2d(inpf, weightf), biasf)),
)
.with_attr("Composite", "conv_bias_relu")
.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")
)
inpf = relay.var("input")
weightf = relay.var("weight")
biasf = relay.var("bias")
func1 = (
relay.Function(
[inpf, weightf, biasf], relay.op.nn.bias_add(relay.op.nn.conv2d(inpf, weightf), biasf)
)
.with_attr("Composite", "conv_bias")
.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_")
)
expected = func1(func0(x, w, b), w2, b2)
assert tvm.ir.structural_equal(partitioned, expected)
def test_partition_dominator():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input")
weight = relay.var("weight")
def generate_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
out = generate_diamond(inp * inp, weight * weight)
partitioned = diamond.partition(out)
i = relay.Var("input")
w = relay.Var("weight")
f = relay.Function([i, w], generate_diamond(i, w)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.relu_nn.relu_nn.leaky_relu_add_"
)
assert tvm.ir.structural_equal(partitioned, f(inp * inp, weight |
* weight))
def test_quadruple_partition_dominator():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()) | is_op(
"add"
)(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input")
weight = relay.var("weight")
def classic_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
def deeper_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
def single_branch(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
return relu + tanh
def nested_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return tanh + leaky_relu
partitioned = diamond.partition(
nested_diamond(
single_branch(deeper_diamond(classic_diamond(inp, weight), weight), weight), weight
)
)
functions = []
partition_names = [
"nn.conv2d_nn.relu_nn.relu_nn.leaky_relu_add_",
"nn.conv2d_nn.relu_nn.relu_tanh_nn.leaky_relu_add_",
"nn.conv2d_nn.relu_nn.relu_tanh_add_",
"nn.conv2d_nn.relu_add_tanh_nn.leaky_relu_add_",
]
for i, f in enumerate([classic_diamond, deeper_diamond, single_branch, neste |
d_diamond]):
inpf = relay.var("input")
weightf = relay.var("weight")
functions.append(
relay.Function([inpf, weightf], f(inpf, weightf)).with_attr(
"PartitionedFromPattern", partition_names[i]
)
)
reference = functions[3](
functions[2](functions[1](functions[0](inp, weight), weight), weight), weight
)
assert tvm.ir.structural_equal(partitioned, reference)
def get_BN(x, var, mean, beta, gamma, eps):
return gamma * (x - mean) / relay.op.sqrt(var + eps) + beta
def test_partition_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
eps = relay.const(1e-5)
BN = get_BN(x, var, mean, beta, gamma, eps)
xf = relay.var("xf")
varf = relay.var("varf")
meanf = relay.var("meanf")
betaf = relay.var("betaf")
gammaf = relay.var("gammaf")
f = relay.Function(
[gammaf, xf, meanf, varf, betaf], get_BN(xf, varf, meanf, betaf, gammaf, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
partitioned = BatchnormCallback().pattern.partition(BN)
reference = f(gamma, x, mean, var, beta)
assert tvm.ir.structural_equal(partitioned, reference)
def test_partition_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
eps = relay.const(1e-5)
BN = gamma * (x - mean) / relay.op.sqrt(var + eps) + beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + eps) + beta
xf = relay.var("xf")
varf = relay.var("varf")
meanf = relay.var("meanf")
betaf = relay.var("betaf")
gammaf = relay.var("gammaf")
f1 = relay.Function(
[gammaf, xf, meanf, varf, betaf], get_BN(xf, varf, meanf, betaf, gammaf, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
xf2 = relay.var("xf2")
varf2 = |
relay.var("varf2")
meanf2 = relay.var("meanf2")
betaf2 = relay.var("betaf2")
gammaf2 = relay.var("gammaf2")
f2 = relay.Function(
[gammaf2, xf2, meanf2, varf2, betaf2], get_BN(xf2, varf2, meanf2, betaf2, gammaf2, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
partitioned = BatchnormCallback().pattern.partition(BN2)
reference = f2(gamma, f1(gamma, x, mean, var, beta), mean, var, beta)
assert tvm.ir.structural_equal(partitioned, reference)
def test_overlappting_partitions():
x = wildcard()
gamma = wildcard()
beta = wildcard()
moving_mean = wildcard()
moving_var = wildcard()
bn_node = is_op("nn.batch_norm")(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = TupleGetItemPattern(bn_node, 0)
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)
T1 = BN[0]
T2 = BN[0]
add = T1 + T2
assert tuple_get_item_node.partition(add) == add
def test_partition_overused():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(wildcard(), wildcard()))
x = relay.var("input")
w = relay.var("weight")
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
out = relu + conv2d
assert pattern.partition(out) == out
def test_partition_fuzzy_tuple():
x = relay.var("x")
y = relay.var("y")
z = x + y
tuple_pattern = is_tuple(None)
concat_pattern = is_op("concatenate")(tuple_pattern)
xp = relay.var("xp")
yp = relay.var("yp")
zp = relay.var("zp")
def create_func(args, body):
return relay.Function(args, body).with_attr("PartitionedFromPattern", "Tuple_concatenate_")
def concat(*args):
return relay.op.concatenate(relay.expr.Tuple(args), axis=0)
one = concat_pattern.partition(concat(x))
assert tvm.ir.structural_equal(one, create_func([xp], concat(xp))(x)) |
two = concat_pattern.partition(concat(x, y))
assert tvm.ir.structural_equal(two, create_func([xp, yp], concat(xp, yp))(x, y))
three = concat_pattern.partition(concat(x, y, z))
assert tvm.ir.structural_equal(three, create_func([xp, yp, zp], concat(xp, yp, zp))(x, y, z))
def test_partition_fuzzy_function_args():
func_pattern = FunctionPattern(None, wildcard() + wildcard())(None) + wildcard()
x = relay.var("x")
y = relay.var("y")
z = relay.var("z")
b = relay.var("b")
xp = relay.var("xp")
yp = relay.var("yp")
zp = relay.var("zp")
def create_func(call):
N = len(call.op.params)
new_params = [relay.var(str(i)) for i in range(N + 1)]
label = "add_FunctionCall_add_"
if N == 3:
label = "add_" + label
return relay.Function(
new_params, relay.Call(call.op, (new_params[0:-1])) + new_params[-1]
).with_attr("PartitionedFromPattern", label)(*([x, y, z][0:N] + [b]))
f1 = relay.Function([xp], xp + xp)(x)
one = func_pattern.partition(f1 + b)
assert tvm.ir.structural_equal(one, create_func(f1))
f2 = relay.Function([xp, yp], xp + yp)(x, y)
two = func_pattern.partition(f2 + b)
assert tvm.ir.structural_equal(two, create_func(f2))
f3 = relay.Function([xp, yp, zp], xp + yp + zp)(x, y, z)
three = func_pattern.partition(f3 + b)
assert tvm.ir.structural_equal(three, create_func(f3))
def test_partition_check():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(is_var("input"), wildcard()))
def check(pre):
return pre.args[0].attrs.data_layout == "NCHW"
x = relay.var("input")
w = relay.var("weight")
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
xf = relay.var("input")
wf = relay.var("weight")
conv2df = relay.op.nn.conv2d(xf, wf)
reluf = relay.op.nn.relu(conv2df)
func = relay.Function([xf, wf], reluf).with_attr("PartitionedFromPattern", "nn.conv2d_nn.relu_")
reference = func(x, w)
partitioned = pattern.par |
tition(relu, check=check)
assert tvm.ir.structural_equal(partitioned, reference)
conv2d = relay.op.nn.conv2d(x, w, data_layout="NHWC")
relu = relay.op.nn.relu(conv2d)
assert relu == pattern.partition(relu, check=check)
def test_partition_check_types():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(wildcard(), wildcard()))
def check(pre):
conv = pre.args[0]
return (conv.attrs.data_layout == "NCHW") and bool(conv.checked_type.shape[0] == 1)
x = relay.var("input", shape=(1, 10, 10, 10))
w = relay.var("weight", shape=(10, 10, 3, 3))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
partitioned = pattern.partition(relu, check=check)
assert partitioned.op.attrs["PartitionedFromPattern"] == "nn.conv2d_nn.relu_"
conv2d = relay.op.nn.conv2d(x, w, data_layout="NHWC")
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
assert relu == pattern.partition(relu, check=check)
x = relay.var("input", shape=(2, 10, 10, 10))
w = relay.var("weight", shape=(10, 10, 3, 3))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
assert relu == pattern.partition(relu, check=check)
def conv_bias_relu(x, w, b):
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
return relu
def test_partition_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
bias = conv2d.optional(lambda x: is_op("nn.bias_add")(x, wildcard()))
pattern1 = is_op("nn.relu")(bias)
conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
bias = is_op("nn.bias_add")(conv2d, wildcard())
pattern2 = bias.optional(lambda x: is_op("nn.relu")(x))
relu = conv_bias_relu(x, w, b)
xf = relay.var("x")
wf = relay.var(" |
w")
bf = relay.var("b")
func = relay.Function([xf, wf, bf], conv_bias_relu(xf, wf, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
assert pattern1.match(relu)
assert tvm.ir.structural_equal(func(x, w, b), pattern1.partition(relu))
assert pattern2.match(relu)
assert tvm.ir.structural_equal(func(x, w, b), pattern2.partition(relu))
def test_partition_function():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_b = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern = FunctionPattern([wc_x1, wc_w1], is_op("nn.conv2d")(wc_x1, wc_w1))
pattern = func_pattern(wc_x, wc_w) + wc_b
func = relay.Function([x1, w1], relay.nn.conv2d(x1, w1))
expr = func(x, w) + b + b
x2 = relay.var("x2")
w2 = relay.var("w2")
b2 = relay.var("b2")
func2 = relay.Function([x2, w2, b2], func(x2, w2) + b2).with_attr(
"PartitionedFromPattern", "nn.conv2d_FunctionCall_add_"
)
expr2 = func2(x, w, b) + b
assert tvm.ir.structural_equal(pattern.partition(expr), expr2)
def test_partition_optional_function():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern0 = FunctionPattern(
[wc_x1, wc_w1], is_op("sigmoid")(is_op("nn.conv2d")(wc_x1, wc_w1))
)
func_pattern1 = FunctionPattern(
[wc_x1, wc_w1], is_op("nn.relu")(is_op("nn.conv2d")(wc_x1, wc_w1))
)
pattern = func_pattern0(wc_x, wc_w) | func_pattern1(wc_x, wc_w)
func = relay.Function([x1, w1], relay.nn.relu(relay.nn.conv2d(x1, w1)))
expr = func(x, w) + b
x2 = relay.var("x2")
w2 = relay.var("w2")
func2 = relay.Function([x2, w2], func(x2, w2)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.relu_FunctionCall_" |
)
expr2 = func2(x, w) + b
assert tvm.ir.structural_equal(pattern.partition(expr), expr2)
def test_rewrite_function_with_fuzzy_body():
"""Allow Rewriting a function with a fuzzy body via dominator analysis"""
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_b = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern = FunctionPattern([wc_x1, wc_w1], wildcard())
pattern = func_pattern(wc_x, wc_w) + wc_b
func = relay.Function([x1, w1], relay.nn.conv2d(x1, w1))
expr = func(x, w) + b + b |
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = pattern
def callback(self, pre, post, node_map):
return x + w
out = rewrite(TestRewrite(), expr)
assert tvm.ir.structural_equal(out, x + w + b)
def test_partition_function_with_fuzzy_body():
"""
Allow Rewriting a function with a fuzzy body via dominator analysis
"""
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_b = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern = FunctionPattern([wc_x1, wc_w1], wildcard())
pattern = func_pattern(wc_x, wc_w) + wc_b
func = relay.Function([x1, w1], relay.nn.conv2d(x1, w1))
expr = func(x, w) + b + b
x2 = relay.var("x2")
w2 = relay.var("w2")
b2 = relay.var("b2")
func2 = relay.Function([x2, w2, b2], func(x2, w2) + b2).with_attr(
"PartitionedFromPattern", "nn.conv2d_FunctionCall_add_"
)
expr2 = func2(x, w, b) + b
assert tvm.ir.structural_equal(pattern.partition(expr), expr2)
def test_match_match():
add_pattern = is_op("add")(wildcard(), wildcard()) |
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
mod = tvm.IRModule({})
tvm.relay.prelude.Prelude(mod)
out = rewrite(TestRewrite(), mod["tensor_concatenate_int64"])
assert tvm.ir.structural_equal(mod["tensor_concatenate_int64"], out)
def test_partition_constant_embedding():
x = relay.var("x")
w = relay.var("w")
wc = relay.const(1)
b = relay.var("b")
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
embeded_func = relay.Function([xf, bf], conv_bias_relu(xf, wc, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
lifted_func = relay.Function([xf, wf, bf], conv_bias_relu(xf, wf, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
relu = conv_bias_relu(x, w, b)
reluc = conv_bias_relu(x, wc, b)
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), wildcard()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(lifted_func(x, wc, b), pattern.partition(reluc))
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_var()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(reluc, pattern.partition(reluc))
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_constant()), wildcard())
)
assert tvm.ir.structural_equal(relu, pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_ |
op("nn.conv2d")(wildcard(), is_expr(wc)), wildcard())
)
assert tvm.ir.structural_equal(relu, pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_var() | is_constant()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_constant() | is_var()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
def test_rewrite_once(): |
class ConcatRewriter(DFPatternCallback):
def __init__(self, rewrite_once):
super().__init__(rewrite_once=rewrite_once)
self.pattern = is_op("concatenate")(None)
def callback(self, pre, post, node_map):
concat_args = post.args[0]
new_args = [concat_args[i] for i in range(len(concat_args) - 1)]
if new_args:
return relay.op.concatenate(relay.expr.Tuple(new_args), axis=0)
else:
return concat_args
x = relay.var("x")
y = relay.var("y")
z = relay.var("z")
concat = relay.op.concatenate(relay.expr.Tuple([x, y, z]), axis=0)
out = rewrite(ConcatRewriter(False), concat)
expected = relay.expr.Tuple([x])
assert tvm.ir.structural_equal(out, expected)
out = rewrite(ConcatRewriter(True), concat)
expected = relay.op.concatenate(relay.expr.Tuple([x, y]), axis=0)
assert tvm.ir.structural_equal(out, expected)
def test_matched_outside_but_dominated():
"""In this example the pattern matches the nn.conv2d/add/multiply flow. Even though the
add output is consumed by the sigmoid, the sigmoid itself is dominated by the multiply.
So partitioning can proceed, all be it with a duplication of the add."""
in_mod = tvm.parser.parse(
"""
def @main(%data: Tensor[(16, 16, 32, 32), float16], %weight: Tensor[(32, 16, 3, 3), float16], %bias: Tensor[(32), float32]) -> Tensor[(16, 32, 32, 32), float32] {
%0 = layout_transform(%data, src_layout="NCHW", dst_layout="NHWC");
%1 = layout_transform(%weight, src_layout="OIHW", dst_layout="OHWI");
%2 = expand_dims(%bias, axis=1, num_newaxis=2);
%3 = expand_dims(%2, axis=0);
%4 = nn.conv2d(%0, %1, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="OHWI", out_dtype="float32");
%5 = layout_transform(%3, src_layout="NCHW", dst_layout="NHWC");
%6 = add(%4, %5);
%7 = sigmoid |
(%6);
%8 = multiply(%6, %7);
layout_transform(%8, src_layout="NHWC", dst_layout="NCHW")
}
"""
)
expected_mod = tvm.parser.parse(
"""
def @main(%data: Tensor[(16, 16, 32, 32), float16], %weight: Tensor[(32, 16, 3, 3), float16], %bias: Tensor[(32), float32]) -> Tensor[(16, 32, 32, 32), float32] {
%2 = expand_dims(%bias, axis=1, num_newaxis=2);
%3 = expand_dims(%2, axis=0);
%4 = layout_transform(%data, src_layout="NCHW", dst_layout="NHWC");
%5 = layout_transform(%weight, src_layout="OIHW", dst_layout="OHWI");
%6 = nn.conv2d(%4, %5, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="OHWI", out_dtype="float32");
%7 = layout_transform(%3, src_layout="NCHW", dst_layout="NHWC");
%8 = add(%6, %7);
%9 = sigmoid(%8);
%10 = fn (%FunctionVar_0_0, %FunctionVar_0_1, %FunctionVar_0_2, %FunctionVar_0_3, PartitionedFromPattern="nn.conv2d_add_multiply_") {
%0 = nn.conv2d(%FunctionVar_0_0, %FunctionVar_0_1, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="OHWI", out_dtype="float32");
%1 = add(%0, %FunctionVar_0_2);
multiply(%1, %FunctionVar_0_3)
};
%11 = %10(%4, %5, %7, %9);
layout_transform(%11, src_layout="NHWC", dst_layout="NCHW")
}
"""
)
pattern = is_op("multiply")(
is_op("add")(is_op("nn.conv2d")(wildcard(), wildcard()), wildcard()), wildcard()
)
actual_mod = tvm.IRModule.from_expr(pattern.partition(in_mod["main"]))
actual_mod = relay.transform.InferType()(actual_mod)
tvm.ir.assert_structural_equal(actual_mod, expected_mod)
def test_partition_parallel_branch_with_same_input():
"""In this example, conv2d's two consumer(add and multiply) on two different branches are
merged into one partition, make sure that the partitioned function has no redundant parameters"""
p |
ath1 = is_op("multiply")(wildcard(), wildcard())
path2 = is_op("add")(wildcard(), wildcard())
pattern = is_op("add")(path1, path2)
i = relay.Var("input")
w = relay.Var("weight")
l = relay.Var("left")
r = relay.Var("right")
conv2d = relay.op.nn.conv2d(i, w)
branch1 = relay.multiply(l, conv2d)
branch2 = relay.add(conv2d, r)
add = relay.add(branch1, branch2)
lf = relay.Var("leftf")
mf = relay.Var("midf")
rf = relay.Var("rightf")
f = relay.Function([lf, mf, rf], (lf * mf) + (mf + rf)).with_attr(
"PartitionedFromPattern", "multiply_add_add_"
)
partitioned = pattern.partition(add)
reference = f(l, conv2d, r)
assert tvm.ir.structural_equal(partitioned, reference)
if __name__ == "__main__":
tvm.testing.main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.relay import var, const, create_executor
from tvm.relay.op import debug
_test_debug_hit = False
def test_debug():
global _test_debug_hit
x = var("x", shape=(), dtype="int32")
_test_debug_hit = False
def did_exec(x):
global _test_debug_hit
_test_debug_hit = True
prog = debug(x, debug_func=did_exec)
result = create_executor().evaluate(prog, {x: const(1, "int32")})
assert _test_debug_hit
assert result.numpy() == 1
def test_debug_with_expr():
global _test_debug_hit
_test_debug_hit = False
x = var("x", shape=(), dtype="int32")
_test_debug_hit = False
def did_exec(x):
global _test_debug_hit
_test_debug_hit = True
prog = debug(x + x * x, debug_func=did_exec)
result = create_executor().evaluate(prog, {x: const(2, "int32")})
assert _test_debug_hit
assert result.numpy() == 6
|
import pytest
from tvm |
import TVMError
from tvm.relay.backend |
import Executor
def test_create_executor():
executor = Executor("aot")
assert executor.name == "aot"
def test_create_executor_with_options():
executor = Executor("aot", {"interface-api": "c"})
assert executor.name == "aot"
assert executor["interface-api"] == "c"
def test_create_executor_with_default():
executor = Executor("graph")
assert not executor["link-params"]
def test_attr_check():
executor = Executor("aot", {"interface-api": "c"})
assert "woof" not in executor
assert "interface-api" in executor
def test_create_executor_not_found():
with pytest.raises(TVMError, match='Executor "woof" is not defined'):
Executor("woof", {})
def test_create_executor_attr_not_found():
with pytest.raises(TVMError, match='Attribute "woof" is not available on this Executor'):
Executor("aot", {"woof": "bark"})
def test_create_executor_attr_type_incorrect():
with pytest.raises(
TVMError,
match='Attribute "interface-api" should have type "runtime.String"'
' but instead found "IntImm"',
):
Executor("aot", {"interface-api": True})
def test_list_executors():
assert "aot" in Executor.list_registered()
@pytest.mark.parametrize("executor", [Executor("aot").name, "aot"])
def test_list_executor_options(executor):
aot_options = Executor.list_registered_options(executor)
assert "interface-api" in aot_options
assert aot_options["interface-api"] == "runtime.String"
def test_list_executor_options_not_found():
with pytest.raises(TVMError, match='Executor "woof" is not defined'):
Executor.list_registered_options("woof") |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import ExprFunctor, ExprMutator, ExprVisitor
def check_visit(expr):
try:
ef = ExprFunctor()
ef.visit(expr)
assert False
except NotImplementedError:
pass
ev = ExprVisitor()
ev.visit(expr)
em = ExprMutator()
assert em.visit(expr)
def test_constant():
check_visit(relay.const(1.0))
def test_tuple():
t = relay.Tuple([relay.var("x", shape=())])
check_visit(t)
def test_var():
v = relay.var("x", shape=())
check_visit(v)
def test_global():
v = relay.GlobalVar("f")
check_visit(v)
def test_function():
x = relay.var("x", shape=())
y = relay.var("y", shape=())
params = [x, y]
body = x + y
ret_type = relay.TensorType(())
type_params = []
attrs = None
f = relay.Function(params, body, ret_type, type_params, attrs)
check_visit(f)
def test_call():
x = relay.var("x", shape=())
y = relay.var("y", shape=())
call = relay.op.add(x, y)
check_visit(call)
def test_let():
x = relay.var("x", shape=())
value = relay.const(2.0)
body = x + x
l = relay.Let(x, value, body)
check_visit(l)
def test_ite():
cond = relay.var("x", shape=(), dtype="bool")
ite = relay.If(cond, cond, cond)
check_visit(ite)
def test_get_item():
t = relay.Tuple([relay.var("x", shape=())])
t = relay.TupleGetItem(t, 0)
check_visit(t)
def test_ref_create():
r = relay.expr.RefCreate(relay.const(1.0))
check_visit(r)
def test_ref_read():
ref = relay.expr.RefCreate(relay.const(1.0))
r = relay.expr.RefRead(ref)
check_visit(r)
def test_ref_write():
ref = relay.expr.RefCreate(relay.const(1.0))
r = relay.expr.RefWrite(ref, relay.const(2.0))
check_visit(r)
def test_memo():
expr = relay.const(1)
for _ in range(100):
expr = expr + expr
check_visit(expr)
def test_match():
p = relay.prelude.Prelude()
check_visit(p.mod[p.map])
def test_match_completeness():
p = relay.prelude.Prelude()
_, _, nil = p.mod.get_type |
("List")
for completeness in [True, False]:
match_expr = relay.adt.Match(nil, [], complete=completeness)
result_expr = ExprMutator().visit(match_expr)
assert result_expr.complete == completeness
if __name__ == "__main__":
test_constant()
test_tuple()
test_var()
test_global()
test_function()
test_call()
test_let()
test_ite()
test_ref_create()
test_ref_read()
test_ref_write()
test_memo()
test_match()
test_match_completeness() |
"""Unit tests for graph partitioning.""" |
import sys
from collections |
import OrderedDict |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import relay, runtime
from tvm.relay.build_module |
import bind_params_by_name
from tvm.relay.op.annotation |
import compiler_begin, compiler_end
from utils.external_codegen |
import (
update_lib,
set_external_func_attr,
parametrize_external_codegen_checks,
parametrize_external_json_codegen_checks,
check_graph_executor_result,
check_vm_result,
)
@parametrize_external_codegen_checks
def test_multi_node_subgraph(check_result):
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
x0 = relay.var("x0", shape=(10, 10))
w00 = relay.var("w00", shape=(10, 10))
w01 = relay.var("w01", shape=(10, 10))
w02 = relay.var("w02", shape=(10, 10))
z00 = relay.add(x0, w00)
p00 = relay.subtract(z00, w01)
q00 = relay.multiply(p00, w02)
subgraph0 = relay.Function([x0, w00, w01, w02], q00)
subgraph0 = set_external_func_attr(subgraph0, "ccompiler", "ccompiler_0")
call0 = relay.Call(subgraph0, [x, w0, w1, w2])
x1 = relay.var("x1", shape=(10, 10))
w10 = relay.var("w10", shape=(10, 10))
w11 = relay.var("w11", shape=(10, 10))
w12 = relay.var("w12", shape=(10, 10))
z10 = relay.add(x1, w10)
p10 = relay.subtract(z10, w11)
q10 = relay.multiply(p10, w12)
subgraph1 = relay.Function([x1, w10, w11, w12], q10)
subgraph1 = set_external_func_attr(subgraph1, "ccompiler", "ccompiler_1")
call1 = relay.Call(subgraph1, [x, w3, w4, w5])
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((call0, call1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = OrderedDict([("x", x_data)] + [("w |
{}".format(i), w_data[i]) for i in range(8)])
check_result(
mod,
map_inputs,
(30, 10),
np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
)
@parametrize_external_codegen_checks
def test_extern_gcc_single_op(check_result):
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
@parametrize_external_codegen_checks
def test_extern_gcc_single_op_int(check_result):
x = relay.var("x", shape=(8, 8), dtype="int32")
y = relay.var("y", shape=(8, 8), dtype="int32")
x0 = relay.var("x0", shape=(8, 8), dtype="int32")
y0 = relay.var("y0", shape=(8, 8), dtype="int32")
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("int32")
y_data = np.random.rand(8, 8).astype("int32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
@parametrize_external_codegen_checks
def test_extern_gcc(check_result):
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
x0 = relay.var("x0", shape=(2, 2))
y0 = relay.var("y0", shape=(2, 2))
mul = x0 * y0
mul = relay.Function([x0, y0], mul)
mul = set_external_func_attr(mul, "ccompiler", "ccompiler_2")
call_mul = relay.Call(mul, [y, y]) |
x1 = relay.var("x1", shape=(2, 2))
y1 = relay.var("y1", shape=(2, 2))
add = x1 + y1
add = relay.Function([x1, y1], add)
add = set_external_func_attr(add, "ccompiler", "ccompiler_1")
call_add = relay.Call(add, [x, x])
x2 = relay.var("x2", shape=(2, 2))
y2 = relay.var("y2", shape=(2, 2))
sub = x2 - y2
sub = relay.Function([x2, y2], sub)
sub = set_external_func_attr(sub, "ccompiler", "ccompiler_0")
call_sub = relay.Call(sub, [call_mul, call_add])
mod = tvm.IRModule.from_expr(call_sub)
x_data = np.random.rand(2, 2).astype("float32")
y_data = np.random.rand(2, 2).astype("float32")
inputs = OrderedDict(
[
("y", y_data),
("x", x_data),
]
)
check_result(mod, inputs, (2, 2), (y_data * y_data) - (x_data + x_data))
@pytest.mark.parametrize("check_result", [check_graph_executor_result, check_vm_result])
def test_extern_gcc_with_target_instance(check_result):
shape = (8, 8)
dtype = "int32"
def make_mod():
x0 = relay.var("x0", shape=shape, dtype=dtype)
y0 = relay.var("y0", shape=shape, dtype=dtype)
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.var("y", shape=shape, dtype=dtype)
call = relay.Call(f, [x, y])
return tvm.IRModule.from_expr(call)
host_target = tvm.target.Target("llvm")
generic_target = tvm.target.Target("llvm", host=host_target)
good_extern_codegen_target = tvm.target.Target(
{"kind": "ccompiler", "header": "
)
bogus_extern_codegen_target = tvm.target.Target(
{"kind": "ccompiler", "header": "Bogus"}, host=host_target
)
mod = make_mod()
x_data = np.random.rand(*shape).astype(dtype)
y_data = np.random.rand(*shape).astype(dtype)
expected_result = x_data + y_data
inputs = {"x": x_data, "y": y_data}
check_result(
mod, |
inputs, shape, expected_result, target=[generic_target, good_extern_codegen_target]
)
with pytest.raises(RuntimeError):
check_result(
mod,
inputs,
shape,
expected_result,
target=[generic_target, bogus_extern_codegen_target],
)
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
@pytest.mark.parametrize("check_result", [check_graph_executor_result, check_vm_result])
def test_extern_gcc_consts(check_result):
shape = (8, 8)
dtype = "float32"
x = relay.var("x", shape=shape)
y0_data = np.random.uniform(0, 1, shape).astype(dtype)
x0 = relay.var("x0", shape=shape)
y0_const = relay.const(y0_data, dtype)
z = x0 + y0_const
f = relay.Function([x0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(*shape).astype(dtype)
inputs = {"x": x_data}
expected_result = x_data + y0_data
check_result(mod, inputs, shape, expected_result, target="llvm")
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl_padding(check_result):
dtype = "float32"
ishape = (1, 1, 99, 12)
w1shape = (54, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), strides=(2, 2), padding=(1, 0, 1, 1))
f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data1, weight1])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uni |
form(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w_data
)
check_result(
mod, {"data0": i_data, "weight0": w_data}, (1, 54, 50, 6), ref_res.numpy(), tol=1e-5
)
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl(check_result):
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight1", shape=(w1shape), dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1, weight1, weight2], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0, weight0, weight0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w_data, w_data
)
check_result(
mod, {"data0": i_data, "weight0": w_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5
)
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_ |
extern_dnnl_const(check_result):
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.const(w_data, dtype=dtype)
weight2 = relay.const(w_data, dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(i_data)
check_result(mod, {"data0": i_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5)
def test_load_params_with_constants_in_ext_codegen():
y_in = np.ones((1,)).astype("float32")
params = {"y": y_in}
mod = tvm.IRModule()
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1,))
xcb = compiler_begin(x, "ccompiler")
ycb = compiler_begin(y, "ccompiler")
z = relay.add(xcb, ycb)
zce = compiler_end(z, "ccompiler")
mod["main"] = relay.Function([x, y], zce)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = relay.transform.PartitionGraph()(mod)
graph_module = relay.build(mod, target="llvm", params=params)
assert len(graph_module.get_params()) == 0
lib = update_lib(graph_module.get_lib())
rt_mod = tvm.contrib.graph_executor.create(graph_module.get_graph_json(), lib, tvm.cpu(0))
rt_mod.load_params(runtime.save_param_dict(graph_module.get_params()))
if __name__ == "__m |
ain__":
tvm.testing.main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" test bind function."""
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm import TVMError
def test_bind_params():
x = relay.var("x")
y = relay.var("y")
z = relay.add(x, y)
f = relay.Function([x, y], z)
fbinded = relay.bind(f, {x: relay.const(1, "float32")})
fexpected = relay.Function([y], relay.add(relay.const(1, "float32"), y))
assert tvm.ir.structural_equal(fbinded, fexpected)
zbinded = relay.bind(z, {y: x})
zexpected = relay.add(x, x)
assert tvm.ir.structural_equal(zbinded, zexpected)
def test_bind_duplicated_params():
a = relay.var("a", shape=(1,))
aa = relay.var("a", shape=(1,))
s = a + aa
func = relay.Function([a, aa], s)
with pytest.raises(TVMError):
relay.build_module.bind_params_by_name(func, {"a": [1.0]})
if __name__ == "__main__":
test_bind_params()
test_bind_duplicated_params()
|
"""Tests for module functionality.""" |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.prelude |
import Prelude
def constructor_list(p):
list_ctors = p.mod.get_type("List")
optional_ctors = p.mod.get_type("Option")
nat_ctors = p.mod.get_type("nat")
rose_ctors = p.mod.get_type("Tree")
return list_ctors[1:] + optional_ctors[1:] + nat_ctors[1:] + rose_ctors[1:]
def adt_list(p):
list_ctors = p.mod.get_type("List")
optional_ctors = p.mod.get_type("Option")
nat_ctors = p.mod.get_type("nat")
rose_ctors = p.mod.get_type("Tree")
return list_ctors[:1] + optional_ctors[:1] + nat_ctors[:1] + rose_ctors[:1]
def test_constructor_tag_round_trip():
mod1 = tvm.IRModule()
p1 = Prelude(mod1)
p1.mod.import_from_std("nat.rly")
mod2 = tvm.IRModule()
p2 = Prelude(mod2)
p2.mod.import_from_std("nat.rly")
ctors1 = constructor_list(p1)
ctors2 = constructor_list(p2)
for i in range(len(ctors1)):
tag = ctors1[i].tag
ctor = mod2.get_constructor(tag)
assert ctor == ctors2[i]
assert ctor.name_hint == ctors1[i].name_hint
def test_constructor_tag_differences():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
adts = adt_list(p)
for adt in adts:
data = mod[adt]
for i in range(len(data.constructors) - 1):
ctor1 = data.constructors[i]
ctor2 = data.constructors[i + 1]
assert ctor2.tag - ctor1.tag == 1
assert ctor1.tag - i != 0
assert ctor2.tag - (i + 1) != 0 |
""" test ir""" |
import pytest |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.tir.expr |
import *
from tvm.relay |
import op |
import numpy as np
def check_json_roundtrip(node):
json_str = tvm.ir.save_json(node)
back = tvm.ir.load_json(json_str)
assert tvm.ir.structural_equal(back, node, map_free_vars=True)
def test_span():
span = relay.Span(None, 1, 2, 3, 4)
assert span.source_name == None
assert span.line == 1
assert span.end_line == 2
assert span.column == 3
assert span.end_column == 4
assert span.same_as(span)
assert span == span
assert isinstance(span, relay.base.Span)
str(span)
back = tvm.ir.load_json(tvm.ir.save_json(span))
assert back.source_name == span.source_name
assert back.line == span.line
assert back.end_line == span.end_line
assert back.column == span.column
assert back.end_column == span.end_column
def test_constant():
arr = tvm.nd.array(10)
const = relay.Constant(arr)
assert const.data == arr
assert const.span == None
str(const)
check_json_roundtrip(const)
def test_tuple():
fields = tvm.runtime.convert([])
tup = relay.Tuple(fields)
assert tup.fields == fields
assert tup.span == None
str(tup)
check_json_roundtrip(tup)
def test_local_var():
name_hint = "s"
lv = relay.Var(name_hint)
assert lv.name_hint == name_hint
assert lv.type_annotation is None
str(lv)
check_json_roundtrip(lv)
t1 = relay.ty.TensorType((), "float")
lv = relay.Var(name_hint, t1)
assert lv.name_hint == name_hint
assert lv.type_annotation == t1
def test_global_var():
name_hint = "g"
gv = relay.GlobalVar(name_hint)
gv.name_hint == name_hint
str(gv)
check_json_roundtrip(gv)
def test_function():
param_names = ["a", "b", "c", "d"]
params = tvm.runtime.convert([relay.Var(n) for n in param_names])
ret_type = relay.TupleType(tvm.runtime.convert([]))
body = relay.Tuple(tvm.runtime.convert([]))
type_params = tvm.runtime.convert([])
fn = relay.Function(params, body, ret_type, type_params)
fn = fn.with_attr("test_attribute |
", "value")
fn = fn.with_attr("test_attribute1", "value1")
assert fn.params == params
assert fn.body == body
assert fn.type_params == type_params
assert fn.span == None
assert fn.attrs["test_attribute"] == "value"
assert fn.attrs["test_attribute1"] == "value1"
str(fn)
check_json_roundtrip(fn)
def test_function_attrs():
param_names = ["a", "b", "c", "d"]
params = tvm.runtime.convert([relay.var(n, shape=(5, 2)) for n in param_names])
ret_type = relay.TupleType(tvm.runtime.convert([]))
body = relay.Tuple(tvm.runtime.convert([]))
type_params = tvm.runtime.convert([])
fn = relay.Function(params, body, ret_type, type_params)
model_params = {}
for param in params[:1]:
cty = param.type_annotation
tensor = np.random.rand(*[int(sh) for sh in cty.shape]).astype(cty.dtype)
model_params[param] = relay.Constant(tvm.nd.array(tensor))
fn = fn.with_attr("__params__", model_params)
assert fn.params == params
assert fn.body == body
assert fn.type_params == type_params
assert fn.span == None
str(fn)
check_json_roundtrip(fn)
json_str = tvm.ir.save_json(fn)
fn_after = tvm.ir.load_json(json_str)
model_params_after = fn_after.attrs["__params__"]
after_keys = [item[0] for item in model_params_after.items()]
for key1, key2 in zip(model_params, after_keys):
assert key1.name_hint == key2.name_hint
p1 = model_params[key1]
p2 = model_params_after[key2]
np.testing.assert_allclose(p1.data.numpy(), p2.data.numpy())
def test_call():
op = relay.Var("f")
arg_names = ["a", "b", "c", "d"]
args = tvm.runtime.convert([relay.Var(n) for n in arg_names])
call = relay.Call(op, args, None, None)
assert call.op == op
assert call.args == args
assert call.span == None
str(call)
check_json_roundtrip(call)
def test_let():
lv = relay.Var("x")
ty = None
arr = tvm.nd.array(10)
value = relay.Constant(arr)
let = relay.Let(lv, |
value, lv)
assert let.var == lv
assert let.value == value
assert let.body == lv
assert let.span == None
str(let)
check_json_roundtrip(let)
def test_if():
cond = relay.Var("cond")
left = relay.Var("left")
right = relay.Var("right")
ife = relay.If(cond, left, right)
assert ife.cond == cond
assert ife.true_branch == left
assert ife.false_branch == right
assert ife.span == None
str(ife)
check_json_roundtrip(ife)
def test_tuple_get_item():
tup = relay.Var("tuple")
get = relay.TupleGetItem(tup, 1)
assert get.tuple_value == tup
assert get.index == 1
str(get)
check_json_roundtrip(get)
def test_op():
add = op.op.get("add")
check_json_roundtrip(add)
def test_conv2d_attrs():
data = relay.var("data", shape=(1, 3, 224, 224))
param = relay.var("param", shape=(64, 3, 7, 7))
out = op.nn.conv2d(data, param, strides=(2, 2), padding=(3, 3), channels=64, kernel_size=(7, 7))
check_json_roundtrip(out)
if __name__ == "__main__":
test_span()
test_constant()
test_tuple()
test_local_var()
test_global_var()
test_function()
test_function_attrs()
test_call()
test_let()
test_if()
test_tuple_get_item()
test_op()
test_conv2d_attrs() |
import tvm
from tvm |
import relay
from tvm.relay.testing.temp_op_attr |
import TempOpAttr
from tvm.relay.op |
import op as _op
def test_op_attr():
log_op = relay.op.get("log")
@tvm.ir.register_op_attr("exp", "ftest")
def test(x):
return x + 1
assert log_op.num_inputs == 1
assert log_op.get_attr("ftest") is None
assert relay.op.get("exp").get_attr("ftest")(1) == 2
def test_op_reset_attr():
"""Tests reset_attr functionality."""
def add1(x):
return x + 1
def add2(x):
return x + 2
tvm.ir.register_op_attr("exp", "fadd1", add1)
tvm.ir.register_op_attr("log", "fadd1", add1)
tvm.ir.register_op_attr("log", "fadd2", add2)
log_op = relay.op.get("log")
log_op.reset_attr("fadd1")
assert log_op.get_attr("fadd1") is None
assert relay.op.get("exp").get_attr("fadd1")(1) == 2
assert relay.op.get("log").get_attr("fadd2")(1) == 3
def test_op_temp_attr():
"""Tests reset_attr functionality."""
def add1(x):
return x + 1
def add2(x):
return x + 2
tvm.ir.register_op_attr("sqrt", "ftest", add1)
with TempOpAttr("sqrt", "ftest", add2):
assert relay.op.get("sqrt").get_attr("ftest")(1) == 3
assert relay.op.get("sqrt").get_attr("ftest")(1) == 2
def test_op_level1():
x = relay.Var("x")
for op_name in ["log", "exp", "sqrt", "rsqrt", "tanh"]:
y = getattr(relay, op_name)(x)
assert y.op.name == op_name
assert y.op.support_level == 1
assert y.args[0] == x
def test_op_level3():
x = relay.Var("x")
for op_name in ["ceil", "floor", "trunc", "round", "abs", "negative"]:
y = getattr(relay, op_name)(x)
assert y.op.name == op_name
assert y.op.support_level == 3
assert y.args[0] == x
def test_op_register():
"""Tests register_op functionality."""
op_name = "custom_op"
_op.register(op_name, r"code(Add two tensor with inner broadcasting.)code")
_op.get(op_name).set_num_inputs(2)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
_op. |
get(op_name).add_argument("data_1", "Tensor", "The input data tensor.")
_op.get(op_name).add_type_rel("Identity")
_op.get(op_name).set_support_level(1)
_op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
_op.register_stateful(op_name, False)
assert _op.get(op_name).name == op_name
assert _op.get(op_name).num_inputs == 2
assert _op.get(op_name).get_attr("TOpPattern") == _op.OpPattern.ELEMWISE
assert _op.get(op_name).get_attr("TOpIsStateful") == False
if __name__ == "__main__":
test_op_attr()
test_op_reset_attr()
test_op_temp_attr()
test_op_level1()
test_op_level3()
test_op_register() |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import relay |
import tvm.relay.testing
from numpy |
import isclose
from typing |
import Union
SEMVER = '
BINARY_OPS = {
"*": relay.multiply,
"/": relay.divide,
"+": relay.add,
"-": relay.subtract,
"<": relay.less,
">": relay.greater,
"<=": relay.less_equal,
">=": relay.greater_equal,
"==": relay.equal,
"!=": relay.not_equal,
}
TYPES = {
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"bool",
"int8x4",
"uint1x4",
"float16x4",
}
LIST_DEFN = """
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
def assert_graph_equal(lhs, rhs):
tvm.ir.assert_structural_equal(lhs, rhs, map_free_vars=True)
def graph_equal(lhs, rhs):
return tvm.ir.structural_equal(lhs, rhs, map_free_vars=True)
def roundtrip_expr(expr):
text = tvm.relay.Expr.astext(expr, show_meta_data=False)
x = tvm.parser.parse_expr(text)
assert_graph_equal(x, expr)
def roundtrip(expr):
x = tvm.parser.fromtext(expr.astext())
assert_graph_equal(x, expr)
def parse_text(code):
expr = tvm.parser.parse_expr(code)
roundtrip_expr(expr)
return expr
def parses_as(code, expr):
parsed = parse_text(code)
result = graph_equal(parsed, expr)
return result
def parse_module(code):
mod = tvm.parser.parse(SEMVER + code)
roundtrip(mod)
return mod
def assert_parses_as(code, expr):
parsed = parse_text(code)
assert_graph_equal(parsed, expr)
def assert_parse_module_as(code, mod):
mod = tvm.relay.transform.InferType()(mod)
parsed = parse_module(code)
assert_graph_equal(parsed, mod)
def get_scalar(x):
return x.data.numpy().item()
int32 = relay.scalar_type("int32")
_ = relay.Var("_")
X = relay.Var("x")
Y = relay.Var("y")
X_ANNO = relay.Var("x", int32)
Y_ANNO = relay.Var("y", int32)
UNIT = relay.Tuple([])
def test_comments():
assert_parses_as(
"""
()
""",
UNIT,
)
assert_parses_as(
"""
/* This is a block comment! |
This is still a block comment!
*/
()
""",
UNIT,
)
assert_parses_as(
"""
/* This is a block comment!
/*Block comment is recursive!*/
*/
()
""",
UNIT,
)
def test_int_literal():
assert isinstance(parse_text("1"), relay.Constant)
assert isinstance(parse_text("1").data, tvm.nd.NDArray)
assert get_scalar(parse_text("1")) == 1
assert get_scalar(parse_text("10")) == 10
assert get_scalar(parse_text("0")) == 0
assert get_scalar(parse_text("-100")) == -100
assert get_scalar(parse_text("-05")) == -5
assert get_scalar(parse_text("9223372036854775807")) == 9223372036854775807
assert get_scalar(parse_text("-42i")) == -42
assert get_scalar(parse_text("-42i16")) == -42
assert get_scalar(parse_text("-42i32")) == -42
assert get_scalar(parse_text("-42i64")) == -42
assert_parses_as("-42i16", relay.const(-42, "int16"))
assert_parses_as("-42i32", relay.const(-42, "int32"))
assert_parses_as("-42i", relay.const(-42, "int32"))
assert_parses_as("-42", relay.const(-42, "int32"))
assert_parses_as("-42i64", relay.const(-42, "int64"))
assert_parses_as("2147483647", relay.const(2147483647, "int32"))
assert_parses_as("2147483648", relay.const(2147483648, "int64"))
with pytest.raises(tvm.error.DiagnosticError):
parse_text("2147483648i32")
with pytest.raises(tvm.error.DiagnosticError):
parse_text("32768i16")
def test_float_literal():
assert get_scalar(parse_text("1.0f")) == 1.0
assert isclose(get_scalar(parse_text("1.56667f")), 1.56667)
assert get_scalar(parse_text("0.0f")) == 0.0
assert get_scalar(parse_text("-10.0f")) == -10.0
assert isclose(get_scalar(parse_text("1e-1f")), 1e-1)
assert get_scalar(parse_text("1e+1f")) == 1e1
assert isclose(get_scalar(parse_text("1E-1f")), 1e-1)
assert get_scalar(parse_text("1E+1f")) == 1e1
assert isclose(get_scalar(parse_text("1.0e-1f")), 1. |
0e-1)
assert get_scalar(parse_text("1.0e+1f")) == 1.0e1
assert isclose(get_scalar(parse_text("1.0E-1f")), 1.0e-1)
assert get_scalar(parse_text("1.0E+1f")) == 1.0e1
assert get_scalar(parse_text("3f16")) == 3.0
assert get_scalar(parse_text("3f32")) == 3.0
assert_parses_as("3f16", relay.const(3.0, "float16"))
assert_parses_as("3f32", relay.const(3.0, "float32"))
assert_parses_as("3f", relay.const(3.0, "float32"))
assert_parses_as("3f64", relay.const(3.0, "float64"))
with pytest.raises(tvm.error.DiagnosticError):
parse_text("3.40283e+38f32")
with pytest.raises(tvm.error.DiagnosticError):
parse_text("65505f16")
def test_bool_literal():
assert get_scalar(parse_text("True")) == True
assert get_scalar(parse_text("False")) == False
assert_parses_as("True", relay.const(True, "bool"))
def test_negative():
assert get_scalar(parse_text("--10")) == 10
assert get_scalar(parse_text("---10")) == -10
def test_bin_op():
for bin_op in BINARY_OPS.keys():
assert_parses_as(
"1 {} 1".format(bin_op), BINARY_OPS.get(bin_op)(relay.const(1), relay.const(1))
)
def test_parens():
assert graph_equal(parse_text("1 * 1 + 1"), parse_text("(1 * 1) + 1"))
assert not graph_equal(parse_text("1 * 1 + 1"), parse_text("1 * (1 + 1)"))
def test_op_assoc():
assert graph_equal(parse_text("1 * 1 + 1 < 1 == 1"), parse_text("(((1 * 1) + 1) < 1) == 1"))
assert graph_equal(parse_text("1 == 1 < 1 + 1 * 1"), parse_text("1 == (1 < (1 + (1 * 1)))"))
def test_vars():
var = parse_text("let %foo = (); %foo")
assert isinstance(var.body, relay.Var)
assert var.body.name_hint == "foo"
global_var = parse_text("@foo")
assert isinstance(global_var, relay.GlobalVar)
assert global_var.name_hint == "foo"
op = parse_text("add")
assert isinstance(op, tvm.ir.Op)
assert op.name == "add"
op = parse_text("nn.global_avg_pool2d")
assert isinstance(op, |
tvm.ir.Op)
assert op.name == "nn.global_avg_pool2d"
def test_meta_ref():
with pytest.raises(tvm.error.DiagnosticError):
meta_op = parse_text("meta[type_key][1337]")
assert meta_op.attrs.node_type_key == "type_key"
assert meta_op.attrs.node_index == 1337
def test_let():
assert_parses_as("let %x = 1; ()", relay.Let(X, relay.const(1), UNIT))
assert_parses_as(
"""
let %x = 1;
let %y = 2;
()
""",
relay.Let(X, relay.const(1), relay.Let(Y, relay.const(2), UNIT)),
)
def test_seq():
assert_parses_as("(); ()", relay.Let(_, UNIT, UNIT))
assert_parses_as("let %_ = 1; ()", relay.Let(X, relay.const(1), UNIT))
def test_graph():
code = "%0 = (); %1 = 1; (%0, %0, %1)"
assert_parses_as(code, relay.Tuple([UNIT, UNIT, relay.const(1)]))
def test_graph_single():
assert_parses_as("%1 = (); %1", relay.Tuple([]))
def test_let_global_var():
with pytest.raises(tvm.error.DiagnosticError):
parse_text("let @x = 1; ()")
def test_let_op():
with pytest.raises(tvm.error.DiagnosticError):
parse_text("let x = 1; ()")
def test_tuple():
assert_parses_as("()", relay.Tuple([]))
assert_parses_as("(0,)", relay.Tuple([relay.const(0)]))
assert_parses_as("(0, 1)", relay.Tuple([relay.const(0), relay.const(1)]))
assert_parses_as("(0, 1, 2)", relay.Tuple([relay.const(0), relay.const(1), relay.const(2)]))
def test_tuple_proj():
x = relay.var("x", shape=())
assert_parses_as(
"free_var %x: float32; %x((%x,).0, %x)",
relay.Call(x, [relay.TupleGetItem(relay.Tuple([x]), 0), x]),
)
def test_func():
assert_parses_as("fn () { 0 }", relay.Function([], relay.const(0), None, []))
assert_parses_as("fn (%x) { %x }", relay.Function([X], X, None, []))
assert_parses_as("fn (%x, %y) { %x + %y }", relay.Function([X, Y], relay.add(X, Y), None, []))
assert_parses_as("fn (%x: int32) -> int32 { %x }", relay.Function([X_ANNO], X_ANNO, int32, [] |
))
def test_defn():
id_defn = parse_module(
"""
def @id(%x: int32) -> int32 {
%x
}
"""
)
assert isinstance(id_defn, tvm.IRModule)
def test_recursive_call():
id_defn = parse_module(
"""
def @id(%x: int32) -> int32 {
@id(%x)
}
"""
)
assert isinstance(id_defn, tvm.IRModule)
def test_ifelse():
assert_parses_as(
"""
if (True) {
0
} else {
1
}
""",
relay.If(relay.const(True), relay.const(0), relay.const(1)),
)
def test_ifelse_scope():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
if (True) {
let %x = ();
()
} else {
%x
}
"""
)
def test_ref():
program = """
def @main(%x: float32) {
%0 = ref(%x);
ref_write(%0, 1f);
ref_read(%0)
}
"""
tvm.parser.parse(program)
def test_call():
id_func = relay.Var("id")
assert_parses_as(
"""
let %id = fn (%x) { %x };
10 * %id(10)
""",
relay.Let(
id_func,
relay.Function([X], X, None, []),
relay.multiply(relay.const(10), relay.Call(id_func, [relay.const(10)])),
),
)
constant = relay.Var("constant")
assert_parses_as(
"""
let %constant = fn () { 0 };
%constant()
""",
relay.Let(
constant,
relay.Function([], relay.const(0), None, []),
relay.Call(constant, [], None, None),
),
)
id_var = relay.Var("id")
assert_parses_as(
"""
let %id = fn (%x) { %x };
%id(1)
""",
relay.Let(
id_var,
relay.Function([X], X, None, []),
relay.Call(id_var, [relay.const(1)], None, None),
),
) |
multiply = relay.Var("multiply")
assert_parses_as(
"""
let %multiply = fn (%x, %y) { %x * %y };
%multiply(0, 0)
""",
relay.Let(
multiply,
relay.Function([X, Y], relay.multiply(X, Y), None, []),
relay.Call(multiply, [relay.const(0), relay.const(0)], None, None),
),
)
assert_parses_as(
"""
(fn (%x) { %x })(0)
""",
relay.Call(relay.Function([X], X, None, []), [relay.const(0)], None, None),
)
curried_mult = relay.Var("curried_mult")
assert_parses_as(
"""
let %curried_mult =
fn (%x) {
fn (%y) {
%x * %y
}
};
%curried_mult(0);
%curried_mult(0)(0)
""",
relay.Let(
curried_mult,
relay.Function([X], relay.Function([Y], relay.multiply(X, Y), None, []), None, []),
relay.Let(
_,
relay.Call(curried_mult, [relay.const(0)], None, None),
relay.Call(
relay.Call(curried_mult, [relay.const(0)], None, None),
[relay.const(0)],
None,
None,
),
),
),
)
assert_parses_as("abs(1)", relay.Call(relay.op.get("abs"), [relay.const(1)], None, None))
def test_incomplete_type():
assert_parses_as("let %_ : _ = (); ()", relay.Let(_, UNIT, UNIT))
def test_builtin_types():
for builtin_type in TYPES:
parse_text("let %_ : {} = (); ()".format(builtin_type))
def test_tensor_type():
assert_parses_as(
"let %_ : Tensor[(), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((1,), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(1, 1), |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.