text
stringlengths 1
2.05k
|
---|
float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((1, 1), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(?, 1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((tvm.tir.Any(), 1), "float32")), UNIT, UNIT),
)
def test_function_type():
assert_parses_as(
"""
let %_: fn () -> int32 = fn () -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([], int32, [], [])),
relay.Function([], relay.const(0), int32, []),
UNIT,
),
)
assert_parses_as(
"""
let %_: fn (int32) -> int32 = fn (%x: int32) -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([int32], int32, [], [])),
relay.Function([relay.Var("x", int32)], relay.const(0), int32, []),
UNIT,
),
)
assert_parses_as(
"""
let %_: fn (int32, int32) -> int32 = fn (%x: int32, %y: int32) -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([int32, int32], int32, [], [])),
relay.Function(
[relay.Var("x", int32), relay.Var("y", int32)], relay.const(0), int32, []
),
UNIT,
),
)
def test_tuple_type():
assert_parses_as(
"""
let %_: () = (); ()
""",
relay.Let(relay.Var("_", relay.TupleType([])), UNIT, UNIT),
)
assert_parses_as(
"""
let %_: (int32,) = (0,); ()
""",
relay.Let(relay.Var("_", relay.TupleType([int32])), relay.Tuple([relay.const(0)]), UNIT),
)
assert_parses_as(
"""
let %_: (int32, int32) = (0, 1); ()
""",
relay.Let(
relay.Var("_", relay.TupleType([int32, int32])),
relay.Tuple([relay.const(0), relay.const(1)]),
UNIT,
),
)
def test_adt_defn():
mod = tvm.IRModule()
glob_typ_var = relay.GlobalTypeVar("Ayy")
pr |
og = relay.TypeData(glob_typ_var, [], [relay.Constructor("Nil", [], glob_typ_var)])
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Ayy { Nil }
""",
mod,
)
def test_adt_any():
code = """
type my_dtype {
my_cons(Tensor[(?, 1), uint16]),
}
"""
mod = parse_module(code)
items = mod.type_definitions.items()
global_type_var, type_data = items[0]
assert global_type_var.name_hint == "my_dtype"
ctors = type_data.constructors
assert len(ctors) == 1
my_cons = ctors[0]
assert my_cons.name_hint == "my_cons"
ty_shape = my_cons.inputs[0].shape
assert isinstance(ty_shape[0], tvm.tir.Any)
assert ty_shape[1] == 1
def test_empty_adt_defn():
mod = tvm.IRModule()
glob_typ_var = relay.GlobalTypeVar("Ayy")
prog = relay.TypeData(glob_typ_var, [], [])
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Ayy { }
""",
mod,
)
def test_multiple_cons_defn():
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
prog = relay.TypeData(
list_var,
[typ_var],
[
relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var),
relay.Constructor("Nil", [], list_var),
],
)
mod[list_var] = prog
assert_parse_module_as(LIST_DEFN, mod)
def test_multiple_type_param_defn():
glob_typ_var = relay.GlobalTypeVar("Either")
typ_var_a = relay.TypeVar("A")
typ_var_b = relay.TypeVar("B")
prog = relay.TypeData(
glob_typ_var,
[typ_var_a, typ_var_b],
[
relay.Constructor("Left", [typ_var_a], glob_typ_var),
relay.Constructor("Right", [typ_var_b], glob_typ_var),
],
)
mod = tvm.IRModule()
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Either[A, B] {
Left(A),
Right(B),
}
""",
mod,
)
def test_match(): |
match_keywords = [("match", True), ("match?", False)]
for (match_keyword, is_complete) in match_keywords:
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var)
nil_constructor = relay.Constructor("Nil", [], list_var)
list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor])
mod[list_var] = list_def
length_var = relay.GlobalVar("length")
typ_var = relay.TypeVar("A")
input_type = list_var(typ_var)
input_var = relay.Var("xs", input_type)
rest_var = relay.Var("rest")
cons_case = relay.Let(
relay.var("", type_annotation=None),
UNIT,
relay.add(relay.const(1), relay.Call(length_var, [rest_var])),
)
body = relay.Match(
input_var,
[
relay.Clause(
relay.PatternConstructor(
cons_constructor, [relay.PatternWildcard(), relay.PatternVar(rest_var)]
),
cons_case,
),
relay.Clause(relay.PatternConstructor(nil_constructor, []), relay.const(0)),
],
complete=is_complete,
)
length_func = relay.Function([input_var], body, int32, [typ_var])
mod[length_var] = length_func
assert_parse_module_as(
"""
%s
def @length[A](%%xs: List[A]) -> int32 {
%s (%%xs) {
Cons(_, %%rest : List[A]) => {
();
1 + @length(%%rest)
},
Nil => 0,
}
}
"""
% (LIST_DEFN, match_keyword),
mod,
)
def test_adt_cons_expr():
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
cons_constructor = relay |
.Constructor("Cons", [typ_var, list_var(typ_var)], list_var)
nil_constructor = relay.Constructor("Nil", [], list_var)
list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor])
mod[list_var] = list_def
make_singleton_var = relay.GlobalVar("make_singleton")
input_var = relay.Var("x", int32)
make_singleton_func = relay.Function(
[input_var], cons_constructor(input_var, nil_constructor()), list_var(int32)
)
mod[make_singleton_var] = make_singleton_func
assert_parse_module_as(
"""
%s
def @make_singleton(%%x: int32) -> List[int32] {
Cons(%%x, Nil)
}
"""
% LIST_DEFN,
mod,
)
def test_duplicate_adt_defn():
with pytest.raises(tvm.error.DiagnosticError):
parse_module(
"""
%s
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
% LIST_DEFN
)
def test_duplicate_adt_cons():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
type Ayy { Lmao }
type Haha { Lmao }
"""
)
def test_duplicate_adt_cons_defn():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
type Ayy { Lmao }
type Lmao { Ayy }
"""
)
def test_duplicate_global_var():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
def @id[A](%x: A) -> A { x }
def @id[A](%x: A) -> A { x }
"""
)
def test_extern_adt_defn():
mod = tvm.IRModule()
extern_var = relay.GlobalTypeVar("T")
typ_var = relay.TypeVar("A")
extern_def = relay.TypeData(extern_var, [typ_var], [])
mod[extern_var] = extern_def
assert_parse_module_as(
"""
extern type T[A]
""",
mod,
)
def test_import_grad():
mod = tvm.IRModule()
mod.import_from_std("gradient.r |
ly")
def test_mlp():
mod, _ = relay.testing.mlp.get_workload(1)
text = mod.astext()
parsed_mod = tvm.parser.parse(text)
tvm.ir.assert_structural_equal(mod, parsed_mod)
def inline_params(mod, params):
main_fn = mod["main"]
str_to_var = {}
for param in main_fn.params:
str_to_var[param.name_hint] = param
bind_map = {}
for param in params:
bind_map[str_to_var[param]] = relay.const(params[param])
body = relay.bind(main_fn.body, bind_map)
main_fn = relay.Function(relay.analysis.free_vars(body), body)
mod._add("main", main_fn, True)
return mod
def test_mlp_inlined_params():
mod, params = relay.testing.mlp.get_workload(1)
mod = inline_params(mod, params)
mod = relay.transform.InferType()(mod)
text = mod.astext()
parsed_mod = tvm.parser.parse(text)
tvm.ir.assert_structural_equal(mod, parsed_mod)
def test_tuple_return_value():
program = """
type Box[T] {
constructor(T)
}
def @example() {
%0 = ();
%1 = constructor(%0);
%2 = constructor(0f);
(%1, %2,)
}
"""
parse_module(program)
def test_parse_if_in_binding():
program = """
def @example(%b: bool) {
%0 = if (%b) {
1
} else {
0
};
%0
}
"""
parse_module(program)
def test_op_string_attr():
call = parse_text(
"""
free_var %x: Tensor[(1, 32, 32, 3), float32];
free_var %y: Tensor[(1, 1, 3, 3), float32];
nn.conv2d(%x, %y, data_layout="NHWC", kernel_layout="HWIO")
"""
)
assert isinstance(call.op, tvm.ir.Op)
assert call.op.name == "nn.conv2d"
assert call.attrs.data_layout == "NHWC"
assert call.attrs.kernel_layout == "HWIO"
def test_load_prelude():
mod = tvm.IRModule()
mod.import_from_std("prelude.rly")
tvm.parser.parse(mod.astext())
def test_call_attrs():
def get_func(shape, dtype):
x0 = relay.var("data", shape=shape, dtype=dtype)
w0 = rel |
ay.var("weight", shape=shape, dtype=dtype)
a = relay.nn.dense(x0, w0)
b = relay.nn.relu(a)
d = relay.add(b, relay.const(1.0, dtype=dtype))
return relay.Function([x0, w0], d)
shape = (2, 4)
dtype = "float32"
sub_func = get_func(shape, dtype)
p0 = relay.var("p0", shape=shape, dtype=dtype)
p1 = relay.var("p1", shape=shape, dtype=dtype)
attr = tvm.ir.make_node("attrs.TestAttrs", name="func_call_attrs")
call = relay.Call(sub_func, [p0, p1], attrs=attr)
func = relay.Function([p0, p1], call)
mod = tvm.IRModule()
mod["main"] = func
mod = tvm.relay.transform.InferType()(mod)
program = """
def @main(%p0: Tensor[(2, 4), float32], %p1: Tensor[(2, 4), float32]) {
%2 = fn (%data: Tensor[(2, 4), float32], %weight: Tensor[(2, 4), float32]) {
%0 = nn.dense(%data, %weight, units=None);
%1 = nn.relu(%0);
add(%1, 1f)
};
%2(%p0, %p1, name="func_call_attrs", attrs_type_key="attrs.TestAttrs")
}
"""
parsed = parse_module(program)
assert_graph_equal(parsed, mod)
def test_tokenize_inf():
x = relay.var("x", shape=(3, 4), dtype="float32")
y = relay.clip(x, -np.inf, np.inf)
f = relay.Function([x], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.AnnotateSpans()(mod)
def test_func_attrs():
attrs = tvm.ir.make_node("DictAttrs", **{"Primitive": 1, "relay.reshape_only": 1})
x = relay.var("x", shape=(2, 3))
func = relay.Function([x], relay.reshape(x, (-1,)), attrs=attrs)
assert_parses_as(func.astext(), func)
def test_init_module_and_metatable():
init_metatable = {"relay.Constant": [relay.const(np.random.rand(2, 3), dtype="float32")]}
init_module = tvm.parser.fromtext(
SEMVER
+ """
def @f(%y : Tensor[(2, 3), float32]) -> Tensor[(2, 3), float32] {
negative(%y)
}
""",
)
mod = tvm.parser.parse(
SEMVER
+ """
def @main(%x: Tensor[(2, 3), float32] |
) {
add(@f(%x), meta[relay.Constant][0])
}
""",
"from_string",
init_module,
init_metatable,
)
roundtrip(mod)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.relay.testing |
import run_opt_pass
def consistent_equal(x, y, map_free_vars=False):
struct_equal0 = tvm.ir.structural_equal(x, y, map_free_vars)
struct_equal1 = tvm.ir.structural_equal(y, x, map_free_vars)
xhash = tvm.ir.structural_hash(x, map_free_vars)
yhash = tvm.ir.structural_hash(y, map_free_vars)
if struct_equal0 != struct_equal1:
raise ValueError(
"Non-communicative {} vs {}, sequal0={}, sequal1={}".format(
x, y, struct_equal0, struct_equal1
)
)
if struct_equal0 != (xhash == yhash):
raise ValueError(
"Inconsistent {} vs {}, sequal={}, xhash={}, yhash={}".format(
x, y, struct_equal0, xhash, yhash
)
)
return struct_equal0
def test_tensor_type_sequal():
t1 = relay.TensorType((3, 4), "float32")
t2 = relay.TensorType((3, 4), "float32")
t3 = relay.TensorType((3, 4, 5), "float32")
assert t1 == t2
assert t1 != t3
t1 = relay.TensorType((), "float32")
t2 = relay.TensorType((), "float32")
assert t1 == t2
def test_incomplete_type_sequal():
t1 = relay.IncompleteType(relay.TypeKind.ShapeVar)
t2 = relay.IncompleteType(relay.TypeKind.Type)
t3 = relay.IncompleteType(relay.TypeKind.Type)
assert t2 == t2
assert t1 == t1
assert t1 != t2
assert t2 != t3
def test_type_param_sequal():
t1 = relay.TypeVar("v1", relay.TypeKind.Type)
t2 = relay.TypeVar("v2", relay.TypeKind.ShapeVar)
t3 = relay.TypeVar("v3", relay.TypeKind.Type)
assert t1 == t1
assert t2 == t2
assert t1 != t2
assert t1 != t3
ft1 = relay.FuncType(
tvm.runtime.convert([]), t1, tvm.runtime.convert([t1]), tvm.runtime.convert([])
)
ft2 = relay.FuncType(
tvm.runtime.convert([]), t3, tvm.runtime.convert([t3]), tvm.runtime.convert([])
)
ft3 = relay.FuncType(
tvm.runtime.convert([]), t2, tvm.runtime.convert([t2]), tvm.runtime.convert([])
)
assert ft1 == ft2 |
assert ft1 != ft3
def test_func_type_sequal():
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
tp1 = relay.TypeVar("v1", relay.TypeKind.Type)
tp2 = relay.TypeVar("v2", relay.TypeKind.Type)
tp3 = relay.TypeVar("v3", relay.TypeKind.ShapeVar)
tp4 = relay.TypeVar("v3", relay.TypeKind.ShapeVar)
broadcast = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
identity = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Identity")
tr1 = relay.TypeRelation(broadcast, tvm.runtime.convert([tp1, tp3]), 1, None)
tr2 = relay.TypeRelation(broadcast, tvm.runtime.convert([tp2, tp4]), 1, None)
tr3 = relay.TypeRelation(identity, tvm.runtime.convert([tp1, tp3]), 1, None)
ft = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr1]),
)
translate_vars = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp2,
tvm.runtime.convert([tp2, tp4]),
tvm.runtime.convert([tr2]),
)
assert ft == translate_vars
different_args = relay.FuncType(
tvm.runtime.convert([t1]), tp1, tvm.runtime.convert([tp1, tp3]), tvm.runtime.convert([tr1])
)
assert ft != different_args
different_order = relay.FuncType(
tvm.runtime.convert([t2, t1]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr1]),
)
assert ft != different_order
no_rel = relay.FuncType(
tvm.runtime.convert([t1, t2]), tp1, tvm.runtime.convert([tp1, tp3]), tvm.runtime.convert([])
)
assert ft != no_rel
more_vars = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp2,
tvm.runtime.convert([tp1, tp2, tp3]),
tvm.runtime.convert([tr1]),
)
assert ft != more_vars
all_the_vars = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp2, tp3, tp4]),
tvm.runtime.convert([tr1, tr2]),
)
ass |
ert ft != all_the_vars
different_rel = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr3]),
)
assert ft != different_rel
more_rels = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr1, tr3]),
)
assert ft != more_rels
def test_tuple_type_sequal():
t1 = relay.TensorType((1, 2, 3), "float32")
t2 = relay.TensorType((1, 2, 3, 4), "float32")
tp1 = relay.TypeVar("v1", relay.TypeKind.Type)
tp2 = relay.TypeVar("v2", relay.TypeKind.Type)
tup1 = relay.TupleType(tvm.runtime.convert([t1, t2, tp1]))
tup2 = relay.TupleType(tvm.runtime.convert([t1, t2, tp1]))
tup3 = relay.TupleType(tvm.runtime.convert([t2, t1, tp1]))
tup4 = relay.TupleType(tvm.runtime.convert([t1, t2, tp2]))
assert tup1 == tup2
assert tup1 != tup3
assert tup1 != tup4
def test_type_relation_sequal():
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
t3 = relay.TensorType((1, 2, 3, 4), "float32")
broadcast = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
identity = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Identity")
attr1 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr1_same = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr2 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4, 4))
tr = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr1)
same = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr1)
diff_func = relay.TypeRelation(identity, tvm.runtime.convert([t1, t2]), 1, attr1)
diff_order = relay.TypeRelation(broadcast, tvm.runtime.convert([t2, t1]), 1, attr1)
diff_args = relay.TypeRelation(broadcast, tvm.runtime.convert([t2, t3]), 1, attr1)
diff_attr = relay.TypeRelation(broadcast, tvm.runtime |
.convert([t1, t2]), 1, attr2)
same_attr = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr1_same)
bigger = relay.TypeRelation(identity, tvm.runtime.convert([t1, t3, t2]), 2, attr1)
diff_num_inputs = relay.TypeRelation(identity, tvm.runtime.convert([t1, t3, t2]), 1, attr2)
assert tr == same
assert tr != diff_func
assert tr != diff_order
assert tr != diff_args
assert tr != diff_attr
assert tr == same_attr
assert tr != bigger
assert bigger != diff_num_inputs
def test_type_call_sequal():
h1 = relay.GlobalTypeVar("h1")
h2 = relay.GlobalTypeVar("h2")
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
t3 = relay.TensorType((1, 2, 3, 4), "float32")
t4 = relay.TensorType((), "float32")
tc = relay.TypeCall(h1, [t1, t2, t3])
same = relay.TypeCall(h1, [t1, t2, t3])
different_func = relay.TypeCall(h2, [t1, t2, t3])
different_arg = relay.TypeCall(h1, [t1, t2, t4])
fewer_args = relay.TypeCall(h1, [t1, t2])
more_args = relay.TypeCall(h1, [t1, t2, t3, t4])
different_order_args = relay.TypeCall(h1, [t3, t2, t1])
assert tc == same
assert tc != different_func
assert tc != fewer_args
assert tc != more_args
assert tc != different_order_args
def test_constant_sequal():
x = relay.const(1)
y = relay.const(2)
assert consistent_equal(x, x)
assert not consistent_equal(x, y)
assert consistent_equal(x, relay.const(1))
def test_type_node_sequal():
v1 = relay.TypeVar("v1", 6)
v2 = relay.TypeVar("v2", 6)
assert not consistent_equal(v1, v2)
v1 = relay.TypeVar("v1", 0)
v2 = relay.TypeVar("v2", 6)
assert not consistent_equal(v1, v2)
def test_type_node_incompatible_sequal():
v1 = relay.TypeVar("v1", 6)
v2 = relay.Var("v2")
assert not consistent_equal(v1, v2)
def test_expr_node_incompatible_sequal():
v1 = relay.Var("v1")
v2 = relay.PatternVar(relay.Var("v2"))
assert not consistent_equal(v1, v2) |
def test_var_sequal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
assert consistent_equal(v1, v1)
assert not consistent_equal(v1, v2)
l1 = relay.Let(v1, relay.const(1), v1)
l2 = relay.Let(v2, relay.const(1), v2)
l3 = relay.Let(v1, relay.const(1), v2)
assert consistent_equal(l1, l2)
assert not consistent_equal(l1, l3)
tt1 = relay.TensorType([], "int32")
tt2 = relay.TensorType([], "int32")
tt3 = relay.TensorType([], "int64")
v3 = relay.Var("v3", tt1)
v4 = relay.Var("v4", tt2)
v5 = relay.Var("v5", tt3)
l4 = relay.Let(v3, relay.const(1), v3)
l5 = relay.Let(v4, relay.const(1), v4)
l6 = relay.Let(v5, relay.const(1), v5)
assert consistent_equal(l4, l5)
assert not consistent_equal(l4, l6)
assert not consistent_equal(l1, l4)
def test_global_var_sequal():
v1 = relay.GlobalVar("v1")
v2 = relay.GlobalVar("v2")
assert consistent_equal(v1, v1)
assert not consistent_equal(v1, v2)
def test_tuple_sequal():
v0 = relay.Var("v0")
v1 = relay.Var("v1")
v2 = relay.Var("v2")
assert consistent_equal(relay.Tuple([]), relay.Tuple([]))
tup = relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])])
same = relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])])
assert consistent_equal(tup, same)
let_tup = relay.Let(v1, tup, v1)
let_mapped = relay.Let(
v2, relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])]), v2
)
assert consistent_equal(let_tup, let_mapped)
more_fields = relay.Tuple(
[v1, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)]), v2]
)
assert not consistent_equal(tup, more_fields)
fewer_fields = relay.Tuple([v1, relay.const(2), relay.const(3)])
assert not consistent_equal(tup, fewer_fields)
different_end = relay.Tuple([v1, relay.const(2), relay.const(3), relay.Tuple([relay.const(5)])])
assert not |
consistent_equal(tup, different_end)
different_start = relay.Tuple(
[v2, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])]
)
assert not consistent_equal(tup, different_start)
longer_at_end = relay.Tuple(
[v1, relay.const(2), relay.const(3), relay.Tuple([relay.const(4), relay.const(5)])]
)
assert not consistent_equal(tup, longer_at_end)
def test_tuple_get_item_sequal():
x = relay.Var("x")
y = relay.Var("y")
assert not consistent_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(y, 1))
assert not consistent_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 2))
assert consistent_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 1))
def test_function_attr():
x0 = relay.var("x0", shape=(10, 10))
w00 = relay.var("w00", shape=(10, 10))
w01 = relay.var("w01", shape=(10, 10))
w02 = relay.var("w02", shape=(10, 10))
z00 = relay.add(x0, w00)
p00 = relay.subtract(z00, w01)
q00 = relay.multiply(p00, w02)
func0 = relay.Function([x0, w00, w01, w02], q00)
func0 = func0.with_attr("FuncName", "a")
x1 = relay.var("x1", shape=(10, 10))
w10 = relay.var("w10", shape=(10, 10))
w11 = relay.var("w11", shape=(10, 10))
w12 = relay.var("w12", shape=(10, 10))
z10 = relay.add(x1, w10)
p10 = relay.subtract(z10, w11)
q10 = relay.multiply(p10, w12)
func1 = relay.Function([x1, w10, w11, w12], q10)
func1 = func1.with_attr("FuncName", "b")
assert not consistent_equal(func0, func1)
def test_function_sequal():
tt1 = relay.TensorType((1, 2, 3), "float32")
tt2 = relay.TensorType((4, 5, 6), "int8")
tt3 = relay.TupleType([tt1, tt2])
v1 = relay.Var("v1", tt1)
v2 = relay.Var("v2", tt2)
v3 = relay.Var("v3", tt3)
v4 = relay.Var("v4", tt2)
vret = relay.Constant(tvm.nd.array(np.ones(1)))
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.Type)
tp3 = relay.TypeVar("tp3", relay.TypeKind.ShapeVar)
tp4 = relay.Typ |
eVar("tp4", relay.TypeKind.ShapeVar)
basic_args = [relay.Var("v3", tt1), relay.Var("v4", tt2)]
basic_tps = [tp1, tp2]
func = relay.Function([v1, v2], v1, tt2, basic_tps)
mapped = relay.Function(basic_args, basic_args[0], tt2, basic_tps)
assert consistent_equal(func, mapped)
fewer_params = relay.Function([relay.Var("v4", tt2)], v4, tt2, basic_tps)
assert not consistent_equal(func, fewer_params)
more_params = relay.Function(
[relay.Var("v3", tt1), relay.Var("v4", tt2), relay.Var("v2", tt2)], v4, tt2, basic_tps
)
assert not consistent_equal(func, more_params)
params_unordered = relay.Function([v2, v1], v1, tt2, basic_tps)
assert not consistent_equal(func, params_unordered)
params_mismatch = relay.Function([v1, v3], v1, tt2, basic_tps)
assert not consistent_equal(func, params_mismatch)
ret_type_mismatch = relay.Function(basic_args, v4, tt1, basic_tps)
assert not consistent_equal(func, ret_type_mismatch)
different_body = relay.Function(basic_args, v3, tt2, basic_tps)
assert not consistent_equal(func, different_body)
fewer_type_params = relay.Function(basic_args, v4, tt2, [tp1])
assert not consistent_equal(func, fewer_type_params)
more_type_params = relay.Function(basic_args, v4, tt2, [tp1, tp2, tp3])
assert not consistent_equal(func, more_type_params)
type_params_unordered = relay.Function(basic_args, v4, tt2, [tp2, tp1])
assert not consistent_equal(func, type_params_unordered)
different_type_params = relay.Function(basic_args, v4, tt2, [tp3, tp4])
assert not consistent_equal(func, different_type_params)
tupled_example = relay.Function(basic_args, relay.Tuple([v3, v4]), tt3)
assert not consistent_equal(func, tupled_example)
no_ret_type = relay.Function(basic_args, v4, None, [tp1, tp2])
assert consistent_equal(no_ret_type, no_ret_type)
assert not consistent_equal(func, no_ret_type)
assert not consistent_equal(no_ret_type, func)
def test_cal |
l_sequal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
attr1 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr1_same = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr2 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4, 4))
tt1 = relay.TensorType((1, 2, 3), "float32")
tt2 = relay.TensorType((), "int8")
basic_args = [relay.const(1), relay.const(2), v2, relay.Tuple([])]
call = relay.Call(v1, [relay.const(1), relay.const(2), v2, relay.Tuple([])], attr1, [tt1])
same = relay.Call(v1, basic_args, attr1, [tt1])
assert consistent_equal(call, same)
different_fn = relay.Call(v2, basic_args, attr1, [tt1])
assert not consistent_equal(call, different_fn)
fewer_args = relay.Call(v1, [relay.const(1), relay.const(2), v2], attr1, [tt1])
assert not consistent_equal(call, fewer_args)
reordered_args = relay.Call(
v1, [relay.const(2), relay.const(1), relay.Tuple([]), v2], attr1, [tt1]
)
assert not consistent_equal(call, reordered_args)
different_args = relay.Call(v1, [relay.const(1), relay.const(2), relay.const(3)], attr1, [tt1])
assert not consistent_equal(call, different_args)
more_args = relay.Call(
v1,
[relay.const(1), relay.const(2), v2, relay.Tuple([]), relay.const(3), relay.const(4)],
attr1,
[tt1],
)
assert not consistent_equal(call, more_args)
different_attrs = relay.Call(v1, basic_args, attr2, [tt1])
assert not consistent_equal(call, different_attrs)
same_attrs = relay.Call(v1, basic_args, attr1_same, [tt1])
assert consistent_equal(call, same_attrs)
no_type_args = relay.Call(v1, basic_args, attr1)
assert not consistent_equal(call, no_type_args)
more_type_args = relay.Call(v1, basic_args, attr1, [tt1, tt2])
assert not consistent_equal(call, more_type_args)
different_type_arg = relay.Call(v1, basic_args, attr1, [tt2])
assert not consistent_equal(call, different_type_arg)
def t |
est_let_sequal():
tt1 = relay.TensorType((), "float32")
tt2 = relay.TensorType((), "int8")
v1 = relay.Var("v1")
v1_wtype = relay.Var("v1", tt1)
v2 = relay.Var("v2")
v3 = relay.Var("v3")
let = relay.Let(v1, relay.const(2), v1)
mapped = relay.Let(v2, relay.const(2), v2)
assert consistent_equal(let, mapped)
mismatched_var = relay.Let(v2, relay.const(2), v3)
assert not consistent_equal(let, mismatched_var)
different_value = relay.Let(v2, relay.const(3), v2)
assert not consistent_equal(let, different_value)
different_body = relay.Let(v2, relay.const(3), relay.const(12))
assert not consistent_equal(let, different_body)
let_with_type = relay.Let(v1_wtype, relay.const(2), v1_wtype)
same_type = relay.Let(v1_wtype, relay.const(2), v1_wtype)
assert consistent_equal(let_with_type, same_type)
assert not consistent_equal(let, let_with_type)
v2 = relay.Var("v1", tt2)
different_type = relay.Let(v2, relay.const(2), v2)
assert not consistent_equal(let_with_type, different_type)
def test_if_sequal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
if_sample = relay.If(v1, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
same = relay.If(v1, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
assert consistent_equal(if_sample, same)
different_cond = relay.If(v2, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
assert not consistent_equal(if_sample, different_cond)
different_true = relay.If(v1, relay.const(2), relay.Tuple([relay.const(2), relay.const(3)]))
assert not consistent_equal(if_sample, different_true)
different_false = relay.If(v1, relay.const(1), relay.Tuple([]))
assert not consistent_equal(if_sample, different_false)
def test_constructor_sequal():
mod = tvm.IRModule()
p = relay.prelude.Prelude(mod)
_, cons, nil = p.mod.get_type("List")
assert consistent_equal(nil, nil)
assert consistent_equal(cons, cons)
assert no |
t consistent_equal(nil, cons)
def test_match_sequal():
mod = tvm.IRModule()
p = relay.prelude.Prelude(mod)
_, cons, nil = p.mod.get_type("List")
_, none, some = p.mod.get_type("Option")
x = relay.Var("x")
y = relay.Var("y")
nil_case = relay.Clause(relay.PatternConstructor(nil), nil())
cons_case = relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(x), relay.PatternVar(y)]), cons(x, y)
)
z = relay.Var("z")
a = relay.Var("a")
equivalent_cons = relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(z), relay.PatternVar(a)]), cons(z, a)
)
data = cons(relay.const(1), cons(relay.const(2), nil()))
match = relay.Match(data, [nil_case, cons_case])
equivalent = relay.Match(data, [nil_case, equivalent_cons])
empty = relay.Match(data, [])
no_cons = relay.Match(data, [nil_case])
no_nil = relay.Match(data, [cons_case])
different_data = relay.Match(nil(), [nil_case, cons_case])
different_order = relay.Match(data, [cons_case, nil_case])
different_nil = relay.Match(
data, [relay.Clause(relay.PatternConstructor(nil), cons(nil(), nil())), cons_case]
)
different_cons = relay.Match(
data,
[
nil_case,
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternWildcard(), relay.PatternWildcard()]),
nil(),
),
],
)
another_case = relay.Match(
data, [nil_case, cons_case, relay.Clause(relay.PatternWildcard(), nil())]
)
wrong_constructors = relay.Match(
data,
[
relay.Clause(relay.PatternConstructor(none), nil()),
relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(x)]), cons(x, nil())),
],
)
tvm.ir.assert_structural_equal(match, match)
assert consistent_equal(match, match)
assert consistent_equal(match, equivalent)
assert not consistent_equal(match, no_cons)
assert not consistent_equal(match, no_nil) |
assert not consistent_equal(match, empty)
assert not consistent_equal(match, different_data)
assert not consistent_equal(match, different_order)
assert not consistent_equal(match, different_nil)
assert not consistent_equal(match, different_cons)
assert not consistent_equal(match, another_case)
assert not consistent_equal(match, wrong_constructors)
def test_op_sequal():
op1 = relay.op.get("add")
op2 = relay.op.get("add")
assert consistent_equal(op1, op2)
op3 = relay.op.get("take")
assert not consistent_equal(op1, op3)
def test_graph_equal():
x = relay.var("x")
y0 = relay.add(x, x)
z0 = relay.add(y0, y0)
y1 = relay.add(x, x)
z1 = relay.add(y1, y1)
z3 = relay.add(relay.add(x, x), relay.add(x, x))
assert consistent_equal(z0, z1)
assert consistent_equal(z0, z1)
assert not consistent_equal(z0, z3)
def test_hash_unequal():
x1 = relay.var("x1", shape=(10, 10), dtype="float32")
y1 = relay.var("y1", shape=(10, 10), dtype="float32")
func1 = relay.Function([x1, y1], relay.add(x1, y1))
x2 = relay.var("x2", shape=(10, 10), dtype="float32")
y2 = relay.var("y2", shape=(10, 10), dtype="float32")
func2 = relay.Function([x2, y2], relay.add(x2, y2))
assert consistent_equal(func1, func2)
x3 = relay.var("x3", shape=(20, 10), dtype="float32")
y3 = relay.var("y3", shape=(20, 10), dtype="float32")
func3 = relay.Function([x3, y3], relay.add(x3, y3))
assert not consistent_equal(func1, func3)
def test_tuple_match():
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
y = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
assert consi |
stent_equal(x, y)
def test_fn_attribute():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
add = relay.add(a, b)
add_fn = relay.Function([a, b], add)
add_fn = run_opt_pass(add_fn, relay.transform.InferType())
c = relay.var("c", shape=(10, 10))
d = relay.var("d", shape=(10, 10))
add_1 = relay.add(c, d)
add_1_fn = relay.Function([c, d], add_1)
add_1_fn = add_1_fn.with_attr("TestAttribute", "test")
add_1_fn = run_opt_pass(add_1_fn, relay.transform.InferType())
assert not consistent_equal(add_1_fn, add_fn)
assert not consistent_equal(add_fn, add_1_fn)
def test_fn_vid_map():
def get_fn(with_vid):
x = relay.var("x", shape=(10,), dtype="float32")
f = relay.Function([x], x).with_attr("dict", {x.vid: 1} if with_vid else {x: 1})
return f
assert consistent_equal(get_fn(True), get_fn(True))
assert consistent_equal(get_fn(False), get_fn(False))
def test_lets():
shape = (5, 5)
def func1():
sb = relay.ScopeBuilder()
p0 = relay.var("p0", shape=shape)
p1 = relay.var("p1", shape=shape)
a0 = sb.let("a0", relay.add(p0, relay.const(1)))
a1 = sb.let("a1", relay.add(p1, relay.const(1)))
a2 = sb.let("a2", relay.add(a0, a1))
sb.ret(a2)
return relay.Function([p0, p1], sb.get())
def func2():
sb = relay.ScopeBuilder()
p0 = relay.var("p0", shape=shape)
p1 = relay.var("p1", shape=shape)
a1 = sb.let("a1", relay.add(p0, relay.const(1)))
a0 = sb.let("a0", relay.add(p1, relay.const(1)))
a2 = sb.let("a2", relay.add(a1, a0))
sb.ret(a2)
return relay.Function([p0, p1], sb.get())
def func3():
sb = relay.ScopeBuilder()
p0 = relay.var("p0", shape=shape)
p1 = relay.var("p1", shape=shape)
a1 = sb.let("a1", relay.add(p1, relay.const(1)))
a0 = sb.let("a0", relay.add(p0, relay.const(1)))
a2 = sb.let("a2", relay.ad |
d(a1, a0))
sb.ret(a2)
return relay.Function([p0, p1], sb.get())
assert tvm.ir.structural_equal(func1(), func2())
assert not tvm.ir.structural_equal(func1(), func3())
if __name__ == "__main__":
tvm.testing.main() |
import tvm |
import tvm.testing
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import testing |
import numpy as np
from tvm.relay |
import Expr
from tvm.relay.analysis |
import free_vars |
import pytest
DEBUG_PRINT = False
SEMVER = '
def astext(program, unify_free_vars=False):
text = program.astext()
if isinstance(program, Expr):
roundtrip_program = tvm.parser.parse_expr(text)
else:
roundtrip_program = tvm.parser.fromtext(text)
tvm.ir.assert_structural_equal(roundtrip_program, program, map_free_vars=True)
return text
def show(text):
if DEBUG_PRINT:
print("---------------------------")
print(text)
def assert_prints_as(expr, str):
assert astext(expr) == SEMVER + str
def test_scalars():
assert_prints_as(relay.const(42, "int16"), "42i16")
assert_prints_as(relay.const(42, "int32"), "42")
assert_prints_as(relay.const(42, "int64"), "42i64")
assert_prints_as(relay.const(3.0, "float16"), "3f16")
assert_prints_as(relay.const(3.0, "float32"), "3f")
assert_prints_as(relay.const(3.0, "float64"), "3f64")
def test_large_graph():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
for i in range(int(9e4)):
z = relay.add(z, one)
f = relay.Function([x, y], z)
show(astext(f))
def test_func():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
z = relay.add(z, z)
f = relay.Function([x, y], z)
show(astext(z))
show(astext(f))
def test_mod():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
z = relay.add(x, y)
z = relay.add(z, z)
f = relay.Function([x, y], z)
mod = tvm.IRModule()
mod["myf"] = f
mod = relay.transform.InferType()(mod)
text = astext(mod)
assert "def @myf" in text
assert "def @myf" in str(mod)
assert "add(%0, %0) /* ty=float32 */" in text
assert "add(%0, %0) /* ty=float32 */" in str(mod)
show(mod.astext(annotate=lambda x: str(x.checked_type.dtype) if type(x) == relay.Call else ""))
show(text)
def test_meta_data():
n, c, h, w = te.size_var |
("n"), 10, 224, 224
x = relay.var("x", shape=(n, c, h, w))
w = relay.var("w")
z = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2)
f = relay.Function([x, w], z)
text = astext(f, unify_free_vars=True)
text_no_meta = str(f)
assert "channels=2" in text
assert "channels=2" in text_no_meta
assert "meta[tir.SizeVar][0]" in text
assert "meta[tir.SizeVar][0]" in text_no_meta
assert "type_key" in text
assert "type_key" not in text_no_meta
text = astext(relay.const([1, 2, 3]))
assert "meta[relay.Constant][0]" in text
def test_call_attrs():
x = relay.var("x")
z = relay.nn.softmax(x, axis=2)
assert "axis=2" in astext(z)
z = relay.nn.softmax(x)
assert "softmax(%x)" in astext(z)
z = relay.expand_dims(x, axis=2, num_newaxis=2)
assert "num_newaxis=2" in astext(z)
def test_let_if_scope():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
cond = relay.var("cond", "bool")
sb = relay.ScopeBuilder()
with sb.if_scope(cond):
v1 = sb.let("v", relay.const(1, "float32"))
v2 = sb.let("v", x)
sb.ret(relay.subtract(v1, v2))
with sb.else_scope():
v3 = relay.var("v")
let2 = relay.Let(v3, y, v3)
sb.ret(relay.add(let2, let2))
result = sb.get()
f = relay.Function([x, y, cond], result)
text = astext(f)
assert text.count("{") == 3
assert "%cond: bool" in text
show(astext(f))
def test_variable_name():
v1 = relay.var("1")
assert "%v1" in astext(v1)
def test_mlp():
net, _ = tvm.relay.testing.mlp.get_workload(batch_size=1)
astext(net)
def test_resnet():
net, _ = tvm.relay.testing.resnet.get_workload(batch_size=1)
astext(net)
def test_mobilenet():
net, _ = tvm.relay.testing.mobilenet.get_workload(batch_size=1)
astext(net)
def test_dqn():
net, _ = tvm.relay.testing.dqn.get_workload(batch_size=1)
astext(net)
def test_dcgan():
net, _ = tvm.relay.testing.dcgan.get_work |
load(batch_size=1)
astext(net)
def test_lstm():
net, _ = tvm.relay.testing.lstm.get_workload(1, 1)
astext(net)
net, _ = tvm.relay.testing.lstm.get_workload(4, 4)
astext(net)
def test_inception_v3():
net, _ = tvm.relay.testing.inception_v3.get_workload(batch_size=1)
astext(net)
def test_squeezenet():
for version in ["1.0", "1.1"]:
net, _ = tvm.relay.testing.squeezenet.get_workload(batch_size=1, version=version)
astext(net)
def test_densenet():
net, _ = tvm.relay.testing.densenet.get_workload(batch_size=1)
astext(net)
def test_call_node_order():
x = relay.var("x")
y = relay.var("y")
prog = relay.Call(
relay.Function([x], x), [relay.Call(relay.Function([y], y), [relay.const(1)])]
)
assert astext(prog) == SEMVER + (
"%0 = fn (%y) {\n"
" %y\n"
"};\n"
"%1 = %0(1);\n"
"%2 = fn (%x) {\n"
" %x\n"
"};\n"
"%2(%1)"
)
def test_let_inlining():
tup = relay.Tuple([relay.const(0), relay.const(0)])
x = relay.var("x")
assert astext(relay.Let(x, tup, tup)) == SEMVER + ("%0 = (0, 0);\n" "let %x = %0;\n" "%0")
assert astext(relay.Let(x, tup, x)) == SEMVER + ("let %x = (0, 0);\n" "%x")
def test_zeros():
x = relay.op.zeros([], "float32")
astext(x)
def test_unapplied_constructor():
type_def_str = r"""
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
main_def_str = r"""
def @main[A]() -> fn (A, List[A]) -> List[A] {
Cons
}
"""
mod = tvm.parser.parse(SEMVER + type_def_str + main_def_str)
mod_str = str(mod)
assert type_def_str.strip() in mod_str
assert main_def_str.strip() in mod_str
def test_null_attribute():
x = relay.var("x")
y = relay.var("y")
z = relay.Function([x], y)
z = z.with_attr("TestAttribute", None)
txt = astext(z)
assert "TestAttribute=None" in txt
def test_span():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float |
32")
z = relay.add(x, one)
z = relay.Call(
z.op, z.args, z.attrs, z.type_args, relay.Span(relay.SourceName("Add0"), 0, 0, 0, 0)
)
z = relay.add(z, z)
z = relay.Call(
z.op, z.args, z.attrs, z.type_args, relay.Span(relay.SourceName("Add1"), 0, 0, 0, 0)
)
f = relay.Function([x, y], z)
txt = astext(f)
assert "Add0" in txt
assert "Add1" in txt
def test_optional_info():
c = relay.const(1)
call = relay.add(c, c)
m = tvm.IRModule.from_expr(call)
m = relay.transform.InferType()(m)
txt = astext(m)
assert txt.count("/* ty=int32 */") == 3
def test_slash_in_identifier():
x = relay.var("base/x")
y = relay.var("base/y")
z = x + y
txt = astext(z)
assert "base/x" in txt
assert "base/y" in txt
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.analysis |
import well_formed
from tvm.relay.prelude |
import Prelude
def test_let():
x = relay.Var("x")
assert well_formed(x)
v = relay.Constant(tvm.nd.array(10))
ty = None
let = relay.Let(x, v, x)
assert well_formed(let)
assert not well_formed(relay.Let(x, v, let))
f = relay.Function([x], x, ty)
assert well_formed(f)
assert well_formed(relay.Let(relay.Var("y"), f, relay.Let(relay.Var("z"), f, v)))
def test_tuple():
x = relay.Var("x")
assert well_formed(x)
v = relay.Constant(tvm.nd.array(10))
let = relay.Let(x, v, x)
assert well_formed(let)
assert well_formed(relay.Tuple([v, v]))
assert not well_formed(relay.Tuple([let, relay.Let(x, v, x)]))
def test_tuple_get_item():
t = relay.Var("t")
assert well_formed(relay.TupleGetItem(t, 2))
def test_adt():
mod = tvm.IRModule()
p = Prelude(mod)
_, none, some = p.mod.get_type("Option")
x = relay.Var("x")
some_case = relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(x)]), x)
default_case = relay.Clause(relay.PatternVar(x), x)
m0 = relay.Match(none(), [default_case])
m1 = relay.Match(none(), [some_case, default_case])
assert well_formed(m0)
assert not well_formed(m1)
if __name__ == "__main__":
test_let()
test_tuple()
test_tuple_get_item()
test_adt() |
import tvm
from tvm |
import relay
from tvm |
import te |
import json
def test_type_var():
nodes = [
{"type_key": ""},
{"type_key": "relay.TypeVar", "attrs": {"kind": "0", "span": "0", "var": "2"}},
{"type_key": "Variable", "attrs": {"dtype": "int32", "name": "in0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.TypeVar)
assert tvar.name_hint == "in0"
nodes[1]["type_key"] = "relay.GlobalTypeVar"
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.GlobalTypeVar)
assert tvar.name_hint == "in0"
def test_var():
nodes = [
{"type_key": ""},
{
"type_key": "relay.Var",
"attrs": {
"_checked_type_": "0",
"span": "0",
"type_annotation": "0",
"vid": "2",
},
},
{"type_key": "relay.Id", "attrs": {"name_hint": "a3"}},
{"type_key": "relay.TensorType", "attrs": {"dtype": "float32", "shape": "4", "span": "0"}},
{"type_key": "Array", "data": [5, 6]},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "16", "span": "0"}},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "8", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, relay.Var)
assert tvar.name_hint == "a3"
def test_incomplete_type():
nodes = [
{"type_key": ""},
{"type_key": "relay.IncompleteType", "attrs": {"kind": "0", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.IncompleteType)
def test_func_tuple_type() |
:
nodes = [
{"type_key": ""},
{
"type_key": "relay.FuncType",
"attrs": {
"arg_types": "2",
"ret_type": "3",
"span": "0",
"type_constraints": "6",
"type_params": "5",
},
},
{"type_key": "Array"},
{"type_key": "relay.TupleType", "attrs": {"fields": "4", "span": "0"}},
{"type_key": "Array"},
{"type_key": "Array"},
{"type_key": "Array"},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.FuncType)
def test_global_var():
nodes = [
{"type_key": ""},
{
"type_key": "relay.GlobalVar",
"attrs": {"_checked_type_": "0", "name_hint": "x", "span": "0"},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.GlobalVar)
nodes = [
{"type_key": ""},
{
"type_key": "GlobalVar",
"attrs": {"_checked_type_": "0", "name_hint": "x", "span": "0"},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.GlobalVar)
def test_op():
nodes = [{"type_key": ""}, {"type_key": "relay.Op", "global_key": "nn.conv2d"}]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
op = tvm.ir.load_json(json.dumps(data))
assert op == relay.op.get("nn.conv2d")
def test_tir_var():
nodes = [
{"type_key": ""},
{"type_key": "Variable", "attrs": {"dtype": "int32", "name": "x", "s |
pan": "0"}},
{"type_key": "SizeVar", "attrs": {"dtype": "int32", "name": "y", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
x = tvm.ir.load_json(json.dumps(data))
assert isinstance(x, tvm.tir.Var)
assert x.name == "x"
data["root"] = 2
y = tvm.ir.load_json(json.dumps(data))
assert isinstance(y, tvm.tir.SizeVar)
assert y.name == "y"
def test_str_map():
nodes = [
{"type_key": ""},
{"type_key": "StrMap", "keys": ["z", "x"], "data": [2, 3]},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "2", "span": "0"}},
{"type_key": "Max", "attrs": {"a": "4", "b": "10", "dtype": "int32", "span": "0"}},
{"type_key": "Add", "attrs": {"a": "5", "b": "9", "dtype": "int32", "span": "0"}},
{"type_key": "Add", "attrs": {"a": "6", "b": "8", "dtype": "int32", "span": "0"}},
{
"type_key": "tir.Var",
"attrs": {"dtype": "int32", "name": "7", "type_annotation": "0", "span": "0"},
},
{"type_key": "runtime.String", "repr_str": "x"},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "1", "span": "0"}},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "2", "span": "0"}},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "100", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
x = tvm.ir.load_json(json.dumps(data))
assert isinstance(x, tvm.ir.container.Map)
assert len(x) == 2
assert "x" in x
assert "z" in x
assert bool(x["z"] == 2)
def test_irmodule_attributes():
nodes = [
{"type_key": ""},
{
"type_key": "IRModule",
"attrs": {
"functions": "0",
"global_type_var_map_": "0",
"global_var_map_": "0",
"source_m |
ap": "0",
"type_definitions": "0",
},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.7.0"},
"b64ndarrays": [],
}
mod = tvm.ir.load_json(json.dumps(data))
assert isinstance(mod, tvm.ir.IRModule)
assert not mod.attrs
def test_virtual_device():
nodes = [
{"type_key": ""},
{
"type_key": "relay.Function",
"attrs": {
"_checked_type_": "0",
"attrs": "0",
"body": "0",
"params": "0",
"ret_type": "0",
"span": "0",
"type_params": "0",
},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.8.0"},
"b64ndarrays": [],
}
func = tvm.ir.load_json(json.dumps(data))
assert isinstance(func, relay.Function)
assert not func.virtual_device_
if __name__ == "__main__":
test_op()
test_type_var()
test_var()
test_incomplete_type()
test_func_tuple_type()
test_global_var()
test_tir_var()
test_str_map() |
"""Unit tests for JSON codegen and runtime.""" |
import os |
import sys |
import numpy as np |
import tvm |
import tvm.relay.op as reg |
import tvm.relay.testing
from tvm |
import relay, runtime
from tvm.contrib |
import utils
from tvm.relay |
import transform
from tvm.relay.backend |
import te_compiler
from tvm.relay.build_module |
import bind_params_by_name
from tvm.relay.op.contrib.register |
import get_pattern_table
def set_func_attr(func, compile_name, symbol_name):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compile_name)
func = func.with_attr("global_symbol", symbol_name)
return func
def check_result(
mod, ref_mod, map_inputs, out_shape, tol=1e-5, target="llvm", device=tvm.cpu(), params=None
):
if sys.platform == "win32":
print("Skip test on Windows for now")
return
te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
json, lib, param = relay.build(ref_mod, target=target, params=params)
rt_mod = tvm.contrib.graph_executor.create(json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
ref_result = out.numpy()
def check_vm_result():
te_compiler.get().clear()
with relay.build_config(opt_level=3):
exe = relay.vm.compile(mod, target=target, params=params)
code, lib = exe.save()
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, device)
out = vm.run(**map_inputs)
tvm.testing.assert_allclose(out.numpy(), ref_result, rtol=tol, atol=tol)
def check_graph_executor_result():
te_compiler.get().clear()
with relay.build_config(opt_level=3):
json, lib, param = relay.build(mod, target=target, params=params)
rt_mod = tvm.contrib.graph_executor.create(json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
tvm.testing.assert_allclose(out.numpy(), ref_result, rtol=tol, atol=tol)
check_vm |
_result()
check_graph_executor_result()
def test_conv2d():
"""Test a subgraph with a single conv2d operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
def conv2d_direct():
dtype = "float32"
ishape = (1, 1, 99, 12)
w1shape = (54, 1, 3, 3)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=w1shape, dtype=dtype)
out = relay.nn.conv2d(
data0, weight0, kernel_size=(3, 3), strides=(2, 2), padding=(1, 0, 1, 1)
)
func = relay.Function([data0, weight0], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=(ishape), dtype=dtype)
weight = relay.var("weight", shape=(w1shape), dtype=dtype)
main_f = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=w1shape, dtype=dtype)
out = relay.nn.conv2d(
data0, weight0, kernel_size=(3, 3), strides=(2, 2), padding=(1, 0, 1, 1)
)
main_f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data}, (1, 54, 50, 6)
def group_conv2d():
dtype = "float32"
ishape = (1, 32, 14, 14)
w2shape = (32, 1, 3, 3)
data0 = relay.var("data", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight", shape=( |
w2shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=32)
func = relay.Function([data0, weight0], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=(ishape), dtype=dtype)
weight = relay.var("weight", shape=(w2shape), dtype=dtype)
main_f = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight", shape=(w2shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=32)
main_f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w2shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w_data}, (1, 32, 14, 14)
for mod, ref_mod, map_inputs, out_shape in [conv2d_direct(), group_conv2d()]:
check_result(mod, ref_mod, map_inputs, out_shape, tol=1e-5)
def test_add():
"""Test a subgraph with a single add operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (10, 10)
def gen_add():
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.add(data0, data1)
func = relay.Function([data0, data1], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0") |
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
main_f = relay.Function([data0, data1], glb_var(data0, data1))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.add(data0, data1)
main_f = relay.Function([data0, data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_add()
data0 = np.random.uniform(0, 1, shape).astype(dtype)
data1 = np.random.uniform(0, 1, shape).astype(dtype)
check_result(mod, ref_mod, {"data0": data0, "data1": data1}, shape, tol=1e-5)
def test_multiply():
"""Test a subgraph with a single add operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (10, 10)
def gen_multiply():
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.multiply(data0, data1)
func = relay.Function([data0, data1], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
main_f = relay.Function([data0, data1], glb_var(data0, data1))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape |
, dtype=dtype)
out = relay.multiply(data0, data1)
main_f = relay.Function([data0, data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_multiply()
data0 = np.random.uniform(0, 1, shape).astype(dtype)
data1 = np.random.uniform(0, 1, shape).astype(dtype)
check_result(mod, ref_mod, {"data0": data0, "data1": data1}, shape, tol=1e-5)
def test_relu():
"""Test a subgraph with a single ReLU operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (1, 32, 14, 14)
def gen_relu(shape):
data0 = relay.var("data0", shape=shape, dtype=dtype)
out = relay.nn.relu(data0)
func = relay.Function([data0], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
main_f = relay.Function([data0], glb_var(data0))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
out = relay.nn.relu(data0)
main_f = relay.Function([data0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
def check(shape):
mod, ref_mod = gen_relu(shape)
data0 = np.random.uniform(-1, 1, shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data0": data0,
},
shape,
tol=1e-5,
)
check(shape=(1, 32, 14, 14))
check(shape=(1, 32))
def test_dense():
"""Test a subgraph with a single dense ope |
rator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
a_shape = (1, 512)
b_shape = (1024, 512)
def gen_dense():
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
out = relay.nn.dense(a, b)
func = relay.Function([a, b], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
main_f = relay.Function([a, b], glb_var(a, b))
mod["main"] = main_f
mod = transform.InferType()(mod)
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
out = relay.nn.dense(a, b)
main_f = relay.Function([a, b], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_dense()
data_a = np.random.uniform(0, 1, a_shape).astype(dtype)
data_b = np.random.uniform(0, 1, b_shape).astype(dtype)
check_result(mod, ref_mod, {"A": data_a, "B": data_b}, (1, 1024), tol=1e-5)
def test_bn():
"""Test a subgraph with a single batch_norm operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
d_shape = (1, 8)
c_shape = (8,)
def gen_bn():
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape) |
bn = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var)
out = bn[0]
func = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
main_f = relay.Function(
[data, gamma, beta, moving_mean, moving_var],
glb_var(data, gamma, beta, moving_mean, moving_var),
)
mod["main"] = main_f
mod = transform.InferType()(mod)
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
bn = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var)
out = bn[0]
main_f = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_bn()
data = np.random.uniform(-1, 1, d_shape).astype(dtype)
gamma = np.random.uniform(-1, 1, c_shape).astype(dtype)
beta = np.random.uniform(-1, 1, c_shape).astype(dtype)
moving_mean = np.random.uniform(-1, 1, c_shape).astype(dtype)
moving_var = np.random.uniform(-1, 1, c_shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data": data,
"gamma": gamma,
"beta": beta,
"moving_mean": moving_mean,
"moving_var": moving_va |
r,
},
d_shape,
tol=1e-5,
)
def test_multiple_ops():
"""Test a subgraph with multiple operators."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
w2shape = (64, 32, 5, 5)
def get_net():
data = relay.var("data", relay.TensorType(ishape, dtype))
w1 = relay.var("w1", relay.TensorType(w1shape, dtype))
w2 = relay.var("w2", relay.TensorType(w2shape, dtype))
layer = relay.nn.conv2d(data=data, weight=w1, kernel_size=(3, 3), padding=(1, 1))
layer = relay.nn.relu(layer)
layer = relay.nn.conv2d(data=layer, weight=w2, kernel_size=(5, 5), padding=(2, 2))
layer = relay.nn.relu(layer)
main_f = relay.Function([data, w1, w2], layer)
mod = tvm.IRModule()
mod["main"] = main_f
return mod
def get_partitoned_mod(mod):
remove_bn_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
byoc_pass = tvm.transform.Sequential(
[
remove_bn_pass,
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
return byoc_pass(mod)
ref_mod = get_net()
mod = get_partitoned_mod(ref_mod)
data = np.random.uniform(0, 1, ishape).astype(dtype)
w1 = np.random.uniform(0, 1, w1shape).astype(dtype)
w2 = np.random.uniform(0, 1, w2shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data": data,
"w1": w1,
"w2": w2,
},
(1, |
64, 14, 14),
tol=1e-5,
)
def test_composite():
"""Test DNNL patterns and there composite functions."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
def conv2d_relu():
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=w1shape, dtype=dtype)
conv2d = relay.nn.conv2d(in_1, in_2, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
func = relay.Function([in_1, in_2], relu)
func = func.with_attr("Composite", "dnnl.conv2d_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_nn.relu_")
arg_1 = relay.var("arg_1", shape=ishape, dtype=dtype)
arg_2 = relay.var("arg_2", shape=w1shape, dtype=dtype)
call = relay.Call(func, [arg_1, arg_2])
p_func = relay.Function([arg_1, arg_2], call)
p_func = set_func_attr(p_func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = p_func
mod = transform.InferType()(mod)
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
main_func = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_func
mod = transform.InferType()(mod)
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
main_func = relay.Function([data, weight], relu)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_func
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishap |
e).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data}, (1, 32, 14, 14)
def conv2d_bias_relu():
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
bshape = (32, 1, 1)
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=w1shape, dtype=dtype)
in_3 = relay.var("in_3", shape=bshape, dtype=dtype)
conv2d = relay.nn.conv2d(in_1, in_2, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d, in_3)
relu = relay.nn.relu(add)
func = relay.Function([in_1, in_2, in_3], relu)
func = func.with_attr("Composite", "dnnl.conv2d_bias_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_add_nn.relu_")
arg_1 = relay.var("arg_1", shape=ishape, dtype=dtype)
arg_2 = relay.var("arg_2", shape=w1shape, dtype=dtype)
arg_3 = relay.var("arg_3", shape=bshape, dtype=dtype)
call = relay.Call(func, [arg_1, arg_2, arg_3])
p_func = relay.Function([arg_1, arg_2, arg_3], call)
p_func = set_func_attr(p_func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = p_func
mod = transform.InferType()(mod)
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
bias = relay.var("bias", shape=bshape, dtype=dtype)
main_func = relay.Function([data, weight, bias], glb_var(data, weight, bias))
mod["main"] = main_func
mod = transform.InferType()(mod)
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
bias = relay.var("bias", shape=bshape, dtype=dtype)
conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d, bi |
as)
relu = relay.nn.relu(add)
main_func = relay.Function([data, weight, bias], relu)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_func
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
b_data = np.random.uniform(0, 1, bshape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data, "bias": b_data}, (1, 32, 14, 14)
for mod, ref_mod, input_maps, out_shape in [conv2d_relu(), conv2d_bias_relu()]:
check_result(mod, ref_mod, input_maps, out_shape, tol=1e-5)
def test_constant():
"""Test the subgraph with (var, const, ...) arguments."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, 32, 3, 3)
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=wshape, dtype=dtype)
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
layer = relay.nn.conv2d(data=data, weight=weight, kernel_size=(3, 3), padding=(1, 1))
bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta, bn_mmean, bn_mvar)
out = bn_output[0]
out = relay.nn.relu(out)
func = relay.Function(relay.analysis.free_vars(out), out)
ref_mod, params = tvm.relay.testing.create_workload(func)
ref_mod["main"] = bind_params_by_name(ref_mod["main"], params)
remove_bn_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
dnnl_patterns = get_pattern_table("dnnl")
composite_partition = tvm.transform.Sequential(
[
transform.MergeComposite(dnnl_patterns), |
transform.AnnotateTarget("dnnl"),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
ref_mod = remove_bn_pass(ref_mod)
mod = composite_partition(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
check_result(mod, ref_mod, {"data": i_data}, (1, 32, 14, 14), tol=1e-5)
def test_partial_constant():
"""Test the subgraph with (const, var, const, var) arguments."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (10, 10)
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=ishape, dtype=dtype)
in_3 = relay.var("in_3", shape=ishape, dtype=dtype)
in_4 = relay.var("in_4", shape=ishape, dtype=dtype)
add1 = relay.add(in_1, in_2)
add2 = relay.add(add1, in_3)
add3 = relay.add(add2, in_3)
add4 = relay.add(add3, in_3)
func = relay.Function([in_1, in_2, in_3, in_4], add4)
ref_mod = tvm.IRModule.from_expr(func)
ref_mod = relay.transform.InferType()(ref_mod)
data1 = np.random.uniform(0, 1, ishape).astype(dtype)
data3 = np.random.uniform(0, 1, ishape).astype(dtype)
params = {
"in_1": tvm.nd.array(data1, device=tvm.cpu(0)),
"in_3": tvm.nd.array(data3, device=tvm.cpu(0)),
}
ref_mod["main"] = bind_params_by_name(ref_mod["main"], params)
opt_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
mod = opt_pass(ref_mod)
data2 = np.random.uniform(0, 1, ishape).astype(dtype) |
data4 = np.random.uniform(0, 1, ishape).astype(dtype)
check_result(mod, ref_mod, {"in_2": data2, "in_4": data4}, (10, 10), tol=1e-5)
if __name__ == "__main__":
test_conv2d()
test_add()
test_multiply()
test_relu()
test_dense()
test_bn()
test_multiple_ops()
test_composite()
test_constant()
test_partial_constant() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.relay.testing import resnet
from tvm.relay.analysis import count_layers
def test_layer_count():
def verify(num_layers):
# Load a resnet with a known number of layers.
mod, _ = resnet.get_workload(num_layers=num_layers)
# Count the number of conv and dense layers.
count = count_layers(mod, valid_ops=["nn.conv2d", "nn.dense"])
assert count == num_layers
verify(18)
verify(50)
if __name__ == "__main__":
test_layer_count()
|
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import relay
def check_memory_plan(func, check_fn):
mod = tvm.IRModule().from_expr(func)
args = []
for param in func.params:
param = param.type_annotation
sh = [int(sh) for sh in param.shape]
data = np.random.rand(*sh).astype(param.dtype)
args.append(tvm.nd.array(data))
ex = relay.create_executor("vm", mod)
no_plan_result = ex.evaluate()(*args)
with tvm.transform.PassContext(opt_level=1, disabled_pass=["MemoryPlan"]):
plan_result = ex.evaluate()(*args)
py_res = check_fn(*[arg.numpy() for arg in args])
np.testing.assert_allclose(no_plan_result.numpy(), plan_result.numpy())
np.testing.assert_allclose(plan_result.numpy(), py_res)
def storage_type(mod):
return relay.TypeCall(mod.get_global_type_var("Storage"), [])
def test_tyck_alloc_storage():
mod = tvm.IRModule()
mod.import_from_std("core.rly")
def test_tyck_alloc_tensor():
mod = tvm.IRModule()
mod.import_from_std("core.rly")
sto = relay.Var("x", storage_type(mod))
sh = relay.const(np.array([1, 2]), dtype="int64")
at = relay.op.memory.alloc_tensor(sto, relay.const(0, dtype="int64"), sh)
mod["main"] = relay.Function([sto], at)
relay.transform.InferType()(mod)
def check_add(x):
return x + x
def test_add():
x = relay.var("x", shape=(2,))
z = x + x
func = relay.Function(
[
x,
],
z,
)
check_memory_plan(func, check_add)
def check_add_sub(x, y):
z = x + x
return z - y
def test_add_sub():
x = relay.var("x", shape=(10,))
y = relay.var("y", shape=(10,))
z = x + x
z = z - y
func = relay.Function([x, y], z)
check_memory_plan(func, check_add_sub)
def check_no_fuse(x, y, w):
z = x + y
return np.matmul(z, np.transpose(w))
def test_no_fuse():
x = relay.var("x", shape=(5, 1))
y = relay.var("y", shape=(5, 1))
w = relay.var("w", shape=(5, 1))
z = x + y
out = relay.op.nn.dense(z, w) |
func = relay.Function([x, y, w], out)
check_memory_plan(func, check_no_fuse)
if __name__ == "__main__":
test_tyck_alloc_tensor()
test_add()
test_add_sub() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
import tvm
import tvm.testing
import tvm.relay as relay
import tvm.relay.backend.utils as utils
import pytest
def test_mangle_mod_name():
assert utils.mangle_module_name("default") == "tvmgen_default"
assert utils.mangle_module_name("ccompiler") == "tvmgen_ccompiler"
assert utils.mangle_module_name("1234"), "tvmgen_1234"
assert utils.mangle_module_name(""), "tvmgen"
assert utils.mangle_module_name(None), "tvmgen"
with pytest.raises(ValueError):
utils.mangle_module_name("\u018e")
utils.mangle_module_name("\xf1")
if __name__ == "__main__":
pytest.main([__file__])
|
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.ir |
import GlobalVar, structural_equal
from tvm.ir.supply |
import NameSupply
from tvm.ir.supply |
import GlobalVarSupply
def test_name_supply():
name_supply = NameSupply("prefix")
name_supply.reserve_name("test")
assert name_supply.contains_name("test")
assert name_supply.fresh_name("test") == "prefix_test_1"
assert name_supply.contains_name("test_1")
assert not name_supply.contains_name("test_1", False)
assert not name_supply.contains_name("test_2")
def test_global_var_supply_from_none():
var_supply = GlobalVarSupply()
global_var = GlobalVar("test")
var_supply.reserve_global(global_var)
assert structural_equal(var_supply.unique_global_for("test"), global_var)
assert not structural_equal(var_supply.fresh_global("test"), global_var)
def test_global_var_supply_from_name_supply():
name_supply = NameSupply("prefix")
var_supply = GlobalVarSupply(name_supply)
global_var = GlobalVar("test")
var_supply.reserve_global(global_var)
assert structural_equal(var_supply.unique_global_for("test", False), global_var)
assert not structural_equal(var_supply.unique_global_for("test"), global_var)
def test_global_var_supply_from_ir_mod():
x = relay.var("x")
y = relay.var("y")
mod = tvm.IRModule()
global_var = GlobalVar("test")
mod[global_var] = relay.Function([x, y], relay.add(x, y))
var_supply = GlobalVarSupply(mod)
second_global_var = var_supply.fresh_global("test", False)
assert structural_equal(var_supply.unique_global_for("test", False), global_var)
assert not structural_equal(var_supply.unique_global_for("test"), global_var)
assert not structural_equal(second_global_var, global_var)
if __name__ == "__main__":
tvm.testing.main() |
import pytest
from tvm |
import TVMError
from tvm.relay.backend.name_transforms |
import (
to_c_function_style,
to_c_variable_style,
to_c_constant_style,
prefix_name,
prefix_generated_name,
)
from tvm.runtime.name_transforms |
import sanitize_name
def test_to_c_function_style():
assert to_c_function_style("TVM_Woof") == "TVMWoof"
assert to_c_function_style("TVM_woof") == "TVMWoof"
assert to_c_function_style("TVM_woof_woof") == "TVMWoofWoof"
assert to_c_function_style("TVMGen_woof_woof") == "TVMGenWoofWoof"
with pytest.raises(TVMError, match="Function not TVM prefixed"):
to_c_function_style("Cake_Bakery")
with pytest.raises(TVMError, match="Function name is empty"):
to_c_function_style("")
def test_to_c_variable_style():
assert to_c_variable_style("TVM_Woof") == "tvm_woof"
assert to_c_variable_style("TVM_woof") == "tvm_woof"
assert to_c_variable_style("TVM_woof_Woof") == "tvm_woof_woof"
with pytest.raises(TVMError, match="Variable not TVM prefixed"):
to_c_variable_style("Cake_Bakery")
with pytest.raises(TVMError, match="Variable name is empty"):
to_c_variable_style("")
def test_to_c_constant_style():
assert to_c_constant_style("TVM_Woof") == "TVM_WOOF"
assert to_c_constant_style("TVM_woof") == "TVM_WOOF"
assert to_c_constant_style("TVM_woof_Woof") == "TVM_WOOF_WOOF"
with pytest.raises(TVMError, match="Constant not TVM prefixed"):
to_c_constant_style("Cake_Bakery")
with pytest.raises(TVMError):
to_c_constant_style("")
def test_prefix_name():
assert prefix_name("Woof") == "TVM_Woof"
assert prefix_name(["Woof"]) == "TVM_Woof"
assert prefix_name(["woof"]) == "TVM_woof"
assert prefix_name(["woof", "moo"]) == "TVM_woof_moo"
with pytest.raises(TVMError, match="Name is empty"):
prefix_name("")
with pytest.raises(TVMError, match="Name segments empty"):
prefix_name([])
with pytest.raises(TVMError, match="Name segment is empty"):
prefix_name([""])
def test_prefix_generated_name():
assert prefix_generated_name("Woof") == "TVMGen_Woof"
assert prefix_generated_name(["Woof"]) == "TVMGen_Woof"
assert prefix_generated_name(["Woof"]) == "TVMGen_Woof"
ass |
ert prefix_generated_name(["woof"]) == "TVMGen_woof"
assert prefix_generated_name(["woof", "moo"]) == "TVMGen_woof_moo"
with pytest.raises(TVMError, match="Name is empty"):
prefix_generated_name("")
with pytest.raises(TVMError, match="Name segments empty"):
prefix_generated_name([])
with pytest.raises(TVMError, match="Name segment is empty"):
prefix_generated_name([""])
def test_sanitize_name():
assert sanitize_name("+_+ ") == "____"
assert sanitize_name("input+") == "input_"
assert sanitize_name("input-") == "input_"
assert sanitize_name("input++") == "input__"
assert sanitize_name("woof:1") == "woof_1"
with pytest.raises(TVMError, match="Name is empty"):
sanitize_name("")
def test_combined_logic():
assert (
to_c_function_style(prefix_name(["Device", "target", "Invoke"])) == "TVMDeviceTargetInvoke"
)
assert to_c_function_style(prefix_generated_name(["model", "Run"])) == "TVMGenModelRun"
assert to_c_variable_style(prefix_name(["Device", "target", "t"])) == "tvm_device_target_t"
assert (
to_c_variable_style(prefix_generated_name(["model", "Devices"])) == "tvmgen_model_devices"
) |
import numpy as np |
import scipy
from scipy |
import special |
import tvm |
import tvm.testing |
import tvm.relay as relay
from tvm |
import topi
from tvm |
import te
from tvm.contrib |
import graph_executor
from tvm.topi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.