text
stringlengths 1
2.05k
|
---|
import count as count_, make_nat_value, make_nat_expr |
import numpy as np
prelude = p = Prelude(tvm.IRModule({}))
p.mod.import_from_std("nat.rly")
def count(e):
return count_(p, e)
dev = tvm.device("llvm", 0)
def eval(expr):
return create_executor(mod=prelude.mod, device=dev, target="llvm").evaluate(expr)
nat, z, s = prelude.mod.get_type("nat")
double = p.mod.get_global_var("nat_double")
add = p.mod.get_global_var("nat_add")
optional, some, none = prelude.mod.get_type("Option")
rlist, cons, nil = prelude.mod.get_type("List")
hd = p.hd
tl = p.tl
nth = p.nth
update = p.update
length = p.length
map = p.map
foldl = p.foldl
foldr = p.foldr
foldr1 = p.foldr1
sum = p.sum
concat = p.concat
filter = p.filter
zip = p.zip
rev = p.rev
unfoldl = p.unfoldl
unfoldr = p.unfoldr
map_accumr = p.map_accumr
map_accuml = p.map_accuml
tree, rose = prelude.mod.get_type("Tree")
tmap = p.tmap
size = p.size
compose = p.compose
iterate = p.iterate
def to_list(l):
assert isinstance(l, ConstructorValue)
val = l
ret = []
while True:
if val.tag == cons.tag:
ret.append(val.fields[0])
val = val.fields[1]
else:
assert val.tag == nil.tag
break
return ret
def tree_to_dict(t):
assert isinstance(t, ConstructorValue)
ret = {}
assert t.tag == rose.tag
ret["member"] = t.fields[0]
ret["children"] = []
for subtree in to_list(t.fields[1]):
l = tree_to_dict(subtree)
ret["children"].append(l)
return ret
def vmobj_to_list(o, dtype="float32"):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy().tolist()]
elif isinstance(o, tvm.runtime.container.ADT):
if len(o) == 0:
tensor_nil = p.get_var("tensor_nil", dtype=dtype)
if tensor_nil.tag == o.tag:
return [0]
return []
result = []
for f in o:
result.extend(vmobj_to_list(f, dtype))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor |
.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1], dtype)
hd = vmobj_to_list(o.fields[0], dtype)
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def get_scalar(tv):
return tv.numpy().item()
def test_nat_value():
assert count(make_nat_value(p, 10)) == 10
assert count(eval(s(s(z())))) == 2
@tvm.testing.uses_gpu
def test_nat_constructor():
func = relay.Function([], z())
test_z = relay.GlobalVar("test_z")
test_sz = relay.GlobalVar("test_sz")
prelude.mod[test_z] = func
func = relay.Function([], s(z()))
prelude.mod[test_sz] = func
ck_mod = relay.transform.InferType()(prelude.mod)
assert ck_mod[test_z].body.checked_type == nat()
assert ck_mod[test_sz].body.checked_type == nat()
@tvm.testing.uses_gpu
def test_double():
assert prelude.mod[double].checked_type == relay.FuncType([nat()], nat())
res = eval(double(s(z())))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_add():
assert prelude.mod[add].checked_type == relay.FuncType([nat(), nat()], nat())
res = eval(add(s(z()), s(z())))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_list_constructor():
test_consz = relay.GlobalVar("test_consz")
func = relay.Function([], cons(z(), nil()))
prelude.mod[test_consz] = func
ck_mod = relay.transform.InferType()(prelude.mod)
assert ck_mod[test_consz].body.checked_type == rlist(nat())
@tvm.testing.uses_gpu
def test_hd_tl():
expected = list(range(10))
l = nil()
for i in reversed(expected):
l = cons(make_nat_expr(prelude, i), l)
got = []
for i in range(len(expecte |
d)):
got.append(count(eval(hd(l))))
l = tl(l)
assert got == expected
@tvm.testing.uses_gpu
def test_nth():
expected = list(range(10))
l = nil()
for i in reversed(expected):
l = cons(relay.const(i), l)
for i in range(len(expected)):
nth = prelude.mod.get_global_var("nth")
item = eval(nth(l, relay.const(i)))
assert get_scalar(item) == i
@tvm.testing.uses_gpu
def test_update():
expected = list(range(10))
l = nil()
for i in range(len(expected)):
l = cons(make_nat_expr(prelude, 0), l)
for i, v in enumerate(expected):
l = update(l, relay.const(i), make_nat_expr(prelude, v))
got = []
for i in range(len(expected)):
got.append(count(eval(nth(l, relay.const(i)))))
assert got == expected
@tvm.testing.uses_gpu
def test_length():
a = relay.TypeVar("a")
assert prelude.mod[length].checked_type == relay.FuncType(
[rlist(a)], relay.scalar_type("int32"), [a]
)
res = eval(length(cons(z(), cons(z(), cons(z(), nil())))))
assert get_scalar(res) == 3
@tvm.testing.uses_gpu
def test_map():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[map].checked_type
rhs = relay.FuncType([relay.FuncType([a], b), rlist(a)], rlist(b), [a, b])
assert lhs == rhs
x = relay.Var("x")
add_one = relay.Function([x], s(x))
res = eval(map(add_one, cons(z(), cons(z(), nil()))))
ones = to_list(res)
assert len(ones) == 2
assert count(ones[0]) == 1 and count(ones[1]) == 1
@tvm.testing.uses_gpu
def test_foldl():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[foldl].checked_type
rhs = relay.FuncType([relay.FuncType([a, b], a), a, rlist(b)], a, [a, b])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
rev_dup = relay.Function([y, x], cons(x, cons(x, y)))
res = eval(
foldl(
rev_dup,
nil(),
cons(
make_nat_expr(prelude, 1), |
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
reversed = to_list(res)
assert len(reversed) == 6
assert count(reversed[0]) == 3 and count(reversed[1]) == 3
assert count(reversed[2]) == 2 and count(reversed[3]) == 2
assert count(reversed[4]) == 1 and count(reversed[5]) == 1
@tvm.testing.uses_gpu
def test_foldr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[foldr].checked_type
rhs = relay.FuncType([relay.FuncType([a, b], b), b, rlist(a)], b, [a, b])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
identity = relay.Function([x, y], cons(x, y))
res = eval(
foldr(
identity,
nil(),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
same = to_list(res)
assert len(same) == 3
assert count(same[0]) == 1 and count(same[1]) == 2 and count(same[2]) == 3
@tvm.testing.uses_gpu
def test_foldr1():
a = relay.TypeVar("a")
lhs = prelude.mod[foldr1].checked_type
rhs = relay.FuncType([relay.FuncType([a, a], a), rlist(a)], a, [a])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
f = relay.Function([x, y], add(x, y))
res = eval(
foldr1(
f,
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
assert count(res) == 6
@tvm.testing.uses_gpu
def test_sum():
assert prelude.mod[sum].checked_type == relay.FuncType(
[rlist(relay.scalar_type("int32"))], relay.scalar_type("int32")
)
res = eval(sum(cons(relay.const(1), cons(relay.const(2), nil()))))
assert get_scalar(res) == 3
@tvm.testing.uses_gpu
def test_concat():
a = relay.TypeVar("a")
assert prelude.mod[concat].checked_type == relay.FuncTyp |
e([rlist(a), rlist(a)], rlist(a), [a])
l1 = cons(make_nat_expr(prelude, 1), cons(make_nat_expr(prelude, 2), nil()))
l2 = cons(make_nat_expr(prelude, 3), cons(make_nat_expr(prelude, 4), nil()))
res = eval(concat(l1, l2))
catted = to_list(res)
assert len(catted) == 4
assert count(catted[0]) == 1
assert count(catted[1]) == 2
assert count(catted[2]) == 3
assert count(catted[3]) == 4
@tvm.testing.uses_gpu
def test_filter():
a = relay.TypeVar("a")
expected_type = relay.FuncType(
[relay.FuncType([a], relay.scalar_type("bool")), rlist(a)], rlist(a), [a]
)
assert prelude.mod[filter].checked_type == expected_type
x = relay.Var("x", nat())
greater_than_one = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(
s, [relay.PatternConstructor(s, [relay.PatternWildcard()])]
),
relay.const(True),
),
relay.Clause(relay.PatternWildcard(), relay.const(False)),
],
),
)
res = eval(
filter(
greater_than_one,
cons(
make_nat_expr(prelude, 1),
cons(
make_nat_expr(prelude, 1),
cons(
make_nat_expr(prelude, 3),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 5), cons(make_nat_expr(prelude, 1), nil())),
),
),
),
),
)
)
filtered = to_list(res)
assert len(filtered) == 2
assert count(filtered[0]) == 3
assert count(filtered[1]) == 5
@tvm.testing.uses_gpu
def test_zip():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType([rlist(a), rlist(b)], rlist(relay.TupleType([a, b])), [a, b])
assert prelude |
.mod[zip].checked_type == expected_type
l1 = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
l2 = cons(nil(), cons(cons(nil(), nil()), cons(cons(nil(), cons(nil(), nil())), nil())))
res = eval(zip(l1, l2))
zipped = to_list(res)
assert len(zipped) == 3
assert count(zipped[0][0]) == 1
assert len(to_list(zipped[0][1])) == 0
assert count(zipped[1][0]) == 2
assert len(to_list(zipped[1][1])) == 1
assert count(zipped[2][0]) == 3
assert len(to_list(zipped[2][1])) == 2
l3 = cons(make_nat_expr(prelude, 4), cons(make_nat_expr(prelude, 5), nil()))
shorter_res = eval(zip(l3, l2))
truncated = to_list(shorter_res)
assert len(truncated) == 2
assert count(truncated[0][0]) == 4
assert len(to_list(truncated[0][1])) == 0
assert count(truncated[1][0]) == 5
assert len(to_list(truncated[1][1])) == 1
l4 = cons(nil(), nil())
shortest_res = eval(zip(l3, l4))
singleton = to_list(shortest_res)
assert len(singleton) == 1
assert count(singleton[0][0]) == 4
assert len(to_list(singleton[0][1])) == 0
@tvm.testing.uses_gpu
def test_rev():
a = relay.TypeVar("a")
assert prelude.mod[rev].checked_type == relay.FuncType([rlist(a)], rlist(a), [a])
res = eval(
rev(
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
)
)
reversed = to_list(res)
assert len(reversed) == 3
assert count(reversed[0]) == 3
assert count(reversed[1]) == 2
assert count(reversed[2]) == 1
@tvm.testing.uses_gpu
def test_unfoldr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType(
[relay.FuncType([a], optional(relay.TupleType([a, b]))), a], rlist(b), [a, b]
)
x = relay.Var("x", nat())
n = relay.Var("n", nat())
count_down = relay.Function(
[x],
relay.Ma |
tch(
x,
[
relay.Clause(
relay.PatternConstructor(s, [relay.PatternVar(n)]), some(relay.Tuple([n, x]))
),
relay.Clause(relay.PatternConstructor(z, []), none()),
],
),
)
res = eval(unfoldr(count_down, make_nat_expr(prelude, 3)))
unfolded = to_list(res)
assert len(unfolded) == 3
assert count(unfolded[0]) == 3
assert count(unfolded[1]) == 2
assert count(unfolded[2]) == 1
@tvm.testing.uses_gpu
def test_unfoldl():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType(
[relay.FuncType([a], optional(relay.TupleType([a, b]))), a], rlist(b), [a, b]
)
x = relay.Var("x", nat())
n = relay.Var("n", nat())
count_down = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(s, [relay.PatternVar(n)]), some(relay.Tuple([n, x]))
),
relay.Clause(relay.PatternConstructor(z, []), none()),
],
),
)
res = eval(unfoldl(count_down, make_nat_expr(prelude, 3)))
unfolded = to_list(res)
assert len(unfolded) == 3
assert count(unfolded[0]) == 1
assert count(unfolded[1]) == 2
assert count(unfolded[2]) == 3
@tvm.testing.uses_gpu
def test_map_accumr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
expected_type = relay.FuncType(
[relay.FuncType([a, b], relay.TupleType([a, c])), a, rlist(b)],
relay.TupleType([a, rlist(c)]),
[a, b, c],
)
assert prelude.mod[map_accumr].checked_type == expected_type
acc = relay.Var("acc", nat())
x = relay.Var("x", nat())
add_acc_to_each = relay.Function([acc, x], relay.Tuple([add(x, acc), add(x, acc)]))
vals = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
res = eval(m |
ap_accumr(add_acc_to_each, z(), vals))
sum = count(res[0])
new_vals = to_list(res[1])
assert sum == 6
assert len(new_vals) == 3
assert count(new_vals[0]) == 6
assert count(new_vals[1]) == 5
assert count(new_vals[2]) == 3
@tvm.testing.uses_gpu
def test_map_accuml():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
expected_type = relay.FuncType(
[relay.FuncType([a, b], relay.TupleType([a, c])), a, rlist(b)],
relay.TupleType([a, rlist(c)]),
[a, b, c],
)
assert prelude.mod[map_accuml].checked_type == expected_type
acc = relay.Var("acc", nat())
x = relay.Var("x", nat())
add_to_acc = relay.Function([acc, x], relay.Tuple([add(x, acc), x]))
vals = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
res = eval(map_accuml(add_to_acc, z(), vals))
sum = count(res[0])
new_vals = to_list(res[1])
assert sum == 6
assert len(new_vals) == 3
assert count(new_vals[0]) == 3
assert count(new_vals[1]) == 2
assert count(new_vals[2]) == 1
@tvm.testing.uses_gpu
def test_optional_matching():
x = relay.Var("x")
y = relay.Var("y")
v = relay.Var("v")
condense = relay.Function(
[x, y],
relay.Match(
x,
[
relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(v)]), cons(v, y)),
relay.Clause(relay.PatternConstructor(none), y),
],
),
)
res = eval(
foldr(
condense,
nil(),
cons(
some(make_nat_expr(prelude, 3)),
cons(none(), cons(some(make_nat_expr(prelude, 1)), nil())),
),
)
)
reduced = to_list(res)
assert len(reduced) == 2
assert count(reduced[0]) == 3
assert count(reduced[1]) == 1
@tvm.testing.uses_gpu
def test_tmap():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = |
prelude.mod[tmap].checked_type
rhs = relay.FuncType([relay.FuncType([a], b), tree(a)], tree(b), [a, b])
assert lhs == rhs
x = relay.Var("x")
add_one = relay.Function([x], s(x))
res = eval(tmap(add_one, rose(z(), cons(rose(z(), nil()), cons(rose(z(), nil()), nil())))))
tree_dict = tree_to_dict(res)
assert count(tree_dict["member"]) == 1
assert len(tree_dict["children"]) == 2
for subtree in tree_dict["children"]:
assert count(subtree["member"]) == 1
assert len(subtree["children"]) == 0
@tvm.testing.uses_gpu
def test_size():
a = relay.TypeVar("a")
lhs = prelude.mod[size].checked_type
rhs = relay.FuncType([tree(a)], relay.scalar_type("int32"), [a])
assert lhs == rhs
root = rose(z(), cons(rose(z(), nil()), cons(rose(z(), nil()), nil())))
t = rose(z(), cons(root, cons(root, cons(root, nil()))))
res = eval(size(t))
assert get_scalar(res) == 10
@tvm.testing.uses_gpu
def test_wildcard_match_solo():
x = relay.Var("x", nat())
copy = relay.Function([x], relay.Match(x, [relay.Clause(relay.PatternWildcard(), x)]), nat())
res = eval(copy(s(s(s(z())))))
assert count(res) == 3
@tvm.testing.uses_gpu
def test_wildcard_match_order():
x = relay.Var("x", rlist(nat()))
y = relay.Var("y")
a = relay.Var("a")
return_zero = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(relay.PatternWildcard(), z()),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(y), relay.PatternVar(a)]), y
),
relay.Clause(relay.PatternConstructor(nil), s(z())),
],
),
nat(),
)
res = eval(return_zero(cons(s(z()), nil())))
assert count(res) == 0
@tvm.testing.uses_gpu
def test_nested_matches():
a = relay.TypeVar("a")
x = relay.Var("x", type_annotation=rlist(rlist(a)))
y = relay.Var("y")
w = relay.Var("w")
h = relay.Var("h")
t = relay |
.Var("t")
flatten = relay.GlobalVar("flatten")
inner_match = relay.Match(
y,
[
relay.Clause(relay.PatternConstructor(nil), flatten(w)),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(h), relay.PatternVar(t)]),
cons(h, flatten(cons(t, w))),
),
],
)
prelude.mod[flatten] = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(relay.PatternConstructor(nil), nil()),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(y), relay.PatternVar(w)]),
inner_match,
),
],
),
rlist(a),
[a],
)
first_list = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
second_list = cons(
make_nat_expr(prelude, 4),
cons(make_nat_expr(prelude, 5), cons(make_nat_expr(prelude, 6), nil())),
)
final_list = cons(first_list, cons(second_list, nil()))
res = eval(flatten(final_list))
flat = to_list(res)
assert len(flat) == 6
for i in range(6):
assert count(flat[i]) == i + 1
@tvm.testing.uses_gpu
def test_match_full_var():
x = relay.Var("x")
v = relay.Var("v")
id_func = relay.Function([x], relay.Match(x, [relay.Clause(relay.PatternVar(v), v)]))
res1 = eval(id_func(nil()))
res2 = eval(id_func(cons(z(), cons(z(), nil()))))
empty = to_list(res1)
assert len(empty) == 0
zeroes = to_list(res2)
assert len(zeroes) == 2
assert count(zeroes[0]) == 0
assert count(zeroes[1]) == 0
@tvm.testing.uses_gpu
def test_nested_pattern_match():
x = relay.Var("x", rlist(nat()))
h1 = relay.Var("h1")
h2 = relay.Var("h2")
t = relay.Var("t")
match = relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(
cons, |
[
relay.PatternVar(h1),
relay.PatternConstructor(cons, [relay.PatternVar(h2), relay.PatternVar(t)]),
],
),
h2,
),
relay.Clause(relay.PatternWildcard(), z()),
],
)
get_second = relay.Function([x], match)
res = eval(get_second(cons(s(z()), cons(s(s(z())), nil()))))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_compose():
n = relay.Var("n")
inc = relay.Function([n], s(n))
x = relay.Var("x")
res = eval(relay.Call(compose(inc, double), [s(s(z()))]))
assert count(res) == 5
@tvm.testing.uses_gpu
def test_iterate():
expr = relay.Call(iterate(double, relay.const(2)), [make_nat_expr(prelude, 3)])
res = eval(relay.Function([], expr)())
assert count(res) == 12
if __name__ == "__main__":
pytest.main([__file__]) |
import pytest |
import tvm
from tvm |
import relay
from tvm.relay.analysis |
import check_basic_block_normal_form
def test_one_block():
x = relay.var("x")
y = relay.add(x, x)
z = relay.add(x, y)
check_basic_block_normal_form(z)
def test_let():
x = relay.var("x")
y = relay.var("y")
body = relay.Let(y, x, y)
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_if():
cond = relay.var("cond", dtype="bool", shape=())
shared = relay.var("shared")
true_branch = shared
false_branch = relay.add(shared, shared)
body = relay.If(cond, true_branch, false_branch)
"""
The program below violates basic block normal form, as the scope of %shared
is ambiguous and should not be in that of true branch.
free_var %cond: bool
if (%cond) {
free_var %shared
%shared
} else {
add(%shared, %shared)
}
"""
check_basic_block_normal_form(body)
def test_valid_if():
cond = relay.var("cond", dtype="bool", shape=())
shared = relay.var("shared")
true_branch = shared
false_branch = relay.add(shared, shared)
body = relay.If(cond, true_branch, false_branch)
shared_bound = relay.var("shared_bound", shape=(1,), dtype="float32")
body = relay.Let(shared, shared_bound, body)
"""
The program below uses let binding to control the scope of %shared, which
follows the basic block normal form.
free_var %shared_bound: Tensor[(1), float32]
let %shared = %shared_bound;
free_var %cond: bool
if (%cond) {
%shared
} else {
add(%shared, %shared)
}
"""
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_if2():
"""
fn (%x: float32) {
%0 = equal(%x, 2f);
if (%0) {
%1 = add(%x, 1f);
multiply(%1, 2f)
} else {
multiply(%1, 1f)
}
}
"""
x = relay.var("x", shape=(), dtype="float32")
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
v1 = relay.add(x, one)
v2 = |
relay.equal(x, two)
true_branch = relay.multiply(v1, two)
false_branch = relay.multiply(v1, one)
body = relay.If(v2, true_branch, false_branch)
func = relay.Function([x], body)
check_basic_block_normal_form(func)
def test_valid_if2():
"""
fn (%x: float32) {
let %v1 = add(%x, 1f);
%0 = equal(%x, 2f);
if (%0) {
multiply(%v1, 2f)
} else {
multiply(%v1, 1f)
}
}
"""
x = relay.var("x", shape=(), dtype="float32")
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
v1 = relay.var("v1")
v2 = relay.equal(x, two)
true_branch = relay.multiply(v1, two)
false_branch = relay.multiply(v1, one)
body = relay.If(v2, true_branch, false_branch)
body = relay.Let(v1, relay.add(x, one), body)
func = relay.Function([x], body)
check_basic_block_normal_form(func)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func():
x = relay.var("x", shape=(1,), dtype="float32")
y = relay.var("y", shape=(1,), dtype="float32")
z = relay.var("z", shape=(1,), dtype="float32")
x2 = relay.add(x, x)
func_a = relay.Function([y], relay.add(x2, y))
func_b = relay.Function([z], relay.add(x2, z))
body = relay.Tuple([func_a, func_b])
body = relay.Function([x], body)
"""
fn (%x: Tensor[(1), float32]) {
%1 = fn (%y: Tensor[(1), float32]) {
%0 = add(%x, %x);
add(%0, %y)
};
%2 = fn (%z: Tensor[(1), float32]) {
add(%0, %z)
};
(%1, %2)
}
"""
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_higher_order_return():
x = relay.var("x", shape=(1,), dtype="float32")
y = relay.var("y", shape=(1,), dtype="float32")
z = relay.var("z", shape=(1,), dtype="float32")
x2 = relay.add(x, x)
func_a = relay.Function([y], relay.add(x2, y))
func_b = relay.Function([z], relay.add(x2, z))
body = relay.Tuple([func_a, func_b])
body = rela |
y.Function([x], body)
"""
fn (%x: Tensor[(1), float32]) {
%1 = fn (%y: Tensor[(1), float32]) {
%0 = add(%x, %x);
add(%0, %y)
};
%2 = fn (%z: Tensor[(1), float32]) {
add(%0, %z)
};
(%1, %2)
}
"""
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_higher_order_nested():
x = relay.var("x", dtype="float32", shape=(1,))
s = relay.var("s", dtype="float32", shape=(1,))
shared = relay.add(s, s)
func_true = relay.Function([x], relay.add(x, shared))
choice_t = relay.FuncType([], relay.scalar_type("bool"))
f = relay.Var("f", choice_t)
z = relay.Var("z")
body = relay.If(f(), func_true, relay.Function([z], relay.add(z, shared)))
top = relay.Function([f, s], body)
"""
fn (%f: fn () -> bool, %s: Tensor[(1), float32]) {
%0 = %f();
if (%0) {
fn (%x: Tensor[(1), float32]) {
%1 = add(%s, %s);
add(%x, %1)
}
} else {
fn (%z) {
add(%z, %1)
}
}
}
"""
check_basic_block_normal_form(top)
if __name__ == "__main__":
pytest.main([__file__]) |
"""Test function extraction""" |
import tvm
from tvm |
import relay
def test_fake_quantize_conv():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.conv2d(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
kernel_size=[5, 5],
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.conv2d": 1}
def test_fake_quantize_dense():
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.dense": 1}
def test_fake_quantize_multiple_regions():
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
op = relay.qnn.op.dequantize(op, relay.const(2.0), relay.const(114))
op = relay.op.nn.relu(op)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
w2 = relay.var("w2", shape=[64, 256], dtype="int8")
op = relay.op.nn.dense(
relay.qnn.op.dequantize(op, relay.const(1.0), zero),
relay.qnn.op.dequantize(w2, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, |
relay.const(1.0), zero, out_dtype="int8")
op = relay.op.sigmoid(op)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.dense": 2, "nn.relu": 1}
def test_fake_quantize_maxpool():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.nn.max_pool2d(x, [3, 3])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.max_pool2d": 1}
def test_fake_quantize_transpose_reshape():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.transpose(x, [1, 0, 2, 3])
op = relay.op.reshape(op, [3, -1])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"transpose": 1, "reshape": 1}
def test_fake_quantize_concat():
zero = relay.const(0)
inputs = []
for i in range(4):
inputs.append(
relay.qnn.op.dequantize(
relay.var("x%d" % i, shape=[1, 4], dtype="int8"), relay.const(i + 0.5), zero
)
)
concat = relay.op.concatenate(inputs, axis=1)
op = relay.qnn.op.quantize(concat, relay.const(3.5), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"concatenate": 1} |
"""Test function extraction""" |
import tvm
from tvm |
import relay
from tvm.relay.testing.synthetic |
import get_workload
def get_conv_net():
"""This gets the net for a case described in fuse_ops.cc:
conv2d
/ | \
/ | \
op op op
\ | /
\ | /
elemwise add
|
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x2 = relay.nn.conv2d(y, relay.var("w3"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x3 = relay.nn.conv2d(y, relay.var("w4"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(x1, x2)
z = relay.add(x3, z)
return tvm.IRModule.from_expr(z)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
return tvm.IRModule.from_expr(y)
def test_extract_identity():
mod = get_conv2d()
items = relay.analysis.extract_fused_functions(mod)
assert len(items) == 1
mod["main"] = mod["main"].with_attr("Primitive", tvm.tir.IntImm("int32", 1))
tvm.ir.structural_equal(list(items.values())[0], mod["main"])
def test_extract_conv_net():
mod = get_conv_net()
items = relay.analysis.extract_fused_functions(mod)
functions = list(items.values())
assert len(functions) == 2
x = functions[0]
y = functions[1]
def is_conv(func):
conv2d = relay.op.op.get("nn.conv2d")
call_node = func.body
return call_node.op == conv2d
def is_conv_add(func):
add = relay.op.op.get("add")
call_node = func.body
maybe_conv_module = tvm.IRModule.from_expr(call_node.args[0])
return call_node.op == add and is_conv(maybe_conv_module["main"]) |
assert (is_conv(x) and is_conv_add(y)) or (is_conv_add(x) and is_conv(y))
def test_extract_resnet():
mod, _params = get_workload()
items = relay.analysis.extract_fused_functions(mod)
assert len(items) == 7
if __name__ == "__main__":
test_extract_identity()
test_extract_conv_net()
test_extract_resnet() |
"""Test function extraction""" |
import pytest |
import tvm
from tvm |
import relay
def get_conv_net():
"""This gets the net for:
conv2d
/ |
/ |
conv2d |
\ |
\ |
elemwise add
|
|
|
split
|
|
|
elemwise add
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
tuple_0_add = relay.add(tuple_out[0], relay.const(1, dtype="float32"))
return tvm.IRModule.from_expr(tuple_0_add)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
return tvm.IRModule.from_expr(y)
def test_extract():
dshape = (1, 1, 5, 1)
def before():
return get_conv_net()
def expected_0():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
return tvm.IRModule.from_expr(y)
def expected_1():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
return tvm.IRModule.from_expr(x1)
def expected_2():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
return tvm.IRModule.from_expr |
(z)
def expected_3():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
return tvm.IRModule.from_expr(tuple_out.astuple())
def expected_4():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
return tvm.IRModule.from_expr(tuple_out[0])
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 0), expected_0()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 1), expected_1()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 2), expected_2()
)
assert tvm.ir.structural_equal(
(relay.analysis.extract_intermdeiate_expr(before(), 3)), expected_3()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 4), expected_4()
)
assert tvm.ir.structural_equal(relay.analysis.extract_intermdeiate_expr(before(), 5), before())
if __name__ == "__main__":
pytest.main([__file__]) |
"""Test function extraction""" |
import pytest |
import tvm
from tvm |
import relay
from tvm.relay.testing.resnet |
import get_workload
from tvm.relay.testing |
import run_opt_pass
def get_conv_net():
"""This gets the net for:
conv2d
/ |
/ |
conv2d |
\ |
\ |
elemwise add
|
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
return tvm.IRModule.from_expr(z)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
return tvm.IRModule.from_expr(y)
def test_extract_identity():
mod = get_conv2d()
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == 1
assert op_freqs["nn.conv2d"] == 1
def test_extract_conv_net():
mod = get_conv_net()
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == 2
assert op_freqs["add"] == 1
assert op_freqs["nn.conv2d"] == 2
def test_extract_fused():
mod = get_conv_net()
mod = relay.transform.InferType()(mod)
mod = relay.transform.FuseOps(3)(mod)
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == 2
assert op_freqs["add"] == 1
assert op_freqs["nn.conv2d"] == 2
def test_extract_resnet():
mod, _params = get_workload()
expected_op_freqs = {
"nn.batch_norm": 19,
"nn.conv2d": 21,
"nn.relu": 18,
"nn.max_pool2d": 1,
"add": 8,
"nn.global_avg_pool2d": 1,
"nn.batch_flatten": 1,
"nn.dense": 1,
"nn.bias_add": 1,
"nn.softmax": 1,
}
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == len(expected_op_freqs)
assert all([op_freqs[op] == expected_op_freqs[op] fo |
r op in expected_op_freqs])
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.analysis |
import detect_feature, Feature
from tvm.relay.transform |
import gradient
from tvm.relay.prelude |
import Prelude
from tvm.relay.testing |
import run_infer_type
def test_prelude():
p = Prelude()
feats = detect_feature(p.mod)
assert feats == set(
[
Feature.fVar,
Feature.fGlobalVar,
Feature.fConstant,
Feature.fTuple,
Feature.fTupleGetItem,
Feature.fFunction,
Feature.fOp,
Feature.fCall,
Feature.fLet,
Feature.fIf,
Feature.fConstructor,
Feature.fMatch,
]
)
def test_ad():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x + x)
func = run_infer_type(func)
mod = tvm.IRModule.from_expr(gradient(func))
mod = relay.transform.InferType()(mod)
back_func = mod["main"]
feats = detect_feature(back_func)
assert feats == set(
[
Feature.fVar,
Feature.fTuple,
Feature.fTupleGetItem,
Feature.fFunction,
Feature.fOp,
Feature.fCall,
Feature.fLet,
Feature.fRefCreate,
Feature.fRefRead,
Feature.fRefWrite,
]
)
if __name__ == "__main__":
test_prelude()
test_ad() |
import numpy as np |
import tvm |
import tvm.relay.testing
from tvm |
import relay
from tvm.relay |
import transform
from tvm.relay.analysis |
import get_calibration_data
def check_data_size(mod, data):
assert len(data) == len(mod.functions) - 1
for key, value in mod.functions.items():
if key.name_hint != "main":
assert len(data[key]["inputs"]) == len(value.params)
if isinstance(value.body, relay.Tuple):
assert len(data[key]["outputs"]) == len(value.body.fields)
else:
assert len(data[key]["outputs"]) == 1
def test_simple_graph():
mod = tvm.IRModule()
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z0 = x0 + y0
z1 = x0 - y0
z2 = relay.Tuple((z0, z1))
f0 = relay.Function([x0, y0], z2)
f0 = f0.with_attr("Compiler", "test_graph")
g0 = relay.GlobalVar("g0")
mod[g0] = f0
mod = relay.transform.InferType()(mod)
x1 = relay.var("x1", shape=(8, 8))
y1 = relay.var("y1", shape=(8, 8))
z1 = x1 - y1
f1 = relay.Function([x1, y1], z1)
f1 = f1.with_attr("Compiler", "test_graph")
g1 = relay.GlobalVar("g1")
mod[g1] = f1
mod = relay.transform.InferType()(mod)
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
z = relay.var("z", shape=(8, 8))
c0 = relay.Call(g0, [x, y])
c1 = relay.Call(g1, [relay.TupleGetItem(c0, 0), z])
fm = relay.Function([x, y, z], c1)
mod["main"] = fm
mod = relay.transform.InferType()(mod)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
z_data = np.random.rand(8, 8).astype("float32")
data = get_calibration_data(mod, {"x": x_data, "y": y_data, "z": z_data})
check_data_size(mod, data)
tvm.testing.assert_allclose(data[g0]["inputs"][0].numpy(), x_data)
tvm.testing.assert_allclose(data[g0]["inputs"][1].numpy(), y_data)
tvm.testing.assert_allclose(data[g0]["outputs"][0].numpy(), x_data + y_data)
tvm.testing.assert_allclose(data[g0]["outputs"][1].numpy(), x_data - y_data)
tvm.testing.assert_allclose(data[g1]["inputs"][0].numpy(), x |
_data + y_data)
tvm.testing.assert_allclose(data[g1]["inputs"][1].numpy(), z_data)
tvm.testing.assert_allclose(data[g1]["outputs"][0].numpy(), x_data + y_data - z_data)
def test_mobilenet_dnnl():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 3, 224, 224)
mod, params = relay.testing.mobilenet.get_workload(batch_size=1, dtype="float32")
mod = transform.AnnotateTarget(["dnnl"])(mod)
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
data = get_calibration_data(mod, {"data": i_data, **params})
check_data_size(mod, data)
if __name__ == "__main__":
test_simple_graph()
test_mobilenet_dnnl() |
import tvm
from tvm |
import relay
from tvm.relay.op.annotation |
import compiler_begin, compiler_end
def check_region(region_set, target, args, nodes, rets):
region = region_set.get_region(args[0])
assert region
assert target == region.target
assert set(args) == set(region.args)
assert set(nodes) == set(region.nodes)
assert set(rets) == set(region.rets)
def test_region_set_creator_diamond():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test_target")
O_1 = relay.abs(cb_1)
ce_1 = compiler_end(O_1, "test_target")
ce_2 = compiler_end(O_1, "test_target")
cb_2 = compiler_begin(ce_1, "test_target")
O_2 = relay.nn.relu(cb_2)
ce_3 = compiler_end(O_2, "test_target")
cb_d = compiler_begin(ce_2, "default")
X = relay.tanh(cb_d)
ce_d = compiler_end(X, "default")
cb_3 = compiler_begin(ce_3, "test_target")
cb_4 = compiler_begin(ce_d, "test_target")
O_3 = relay.add(cb_3, cb_4)
ce_4 = compiler_end(O_3, "test_target")
diamond = relay.Function([data], ce_4)
region_set = relay.analysis.AnnotatedRegionSet(
diamond, relay.op.get("annotation.compiler_begin"), relay.op.get("annotation.compiler_end")
)
assert len(region_set) == 4
check_region(
region_set,
"test_target",
[cb_1],
[cb_1, O_1, ce_1, ce_2],
[ce_1, ce_2],
)
check_region(
region_set,
"test_target",
[cb_2],
[cb_2, O_2, ce_3],
[ce_3],
)
check_region(
region_set,
"default",
[cb_d],
[cb_d, X, ce_d],
[ce_d],
)
check_region(
region_set,
"test_target",
[cb_3, cb_4],
[cb_3, cb_4, O_3, ce_4],
[ce_4],
)
def test_region_set_creator_merged():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test_target")
O_1 = relay.abs(cb_1)
ce_2 = compiler_end(O_1, "test_target")
O_2 = relay.nn.relu(O_1)
ce_3 = compiler_end(O_2, "test_target")
cb_d = compiler_begin(ce_2, "default")
X = relay |
.tanh(cb_d)
ce_d = compiler_end(X, "default")
cb_3 = compiler_begin(ce_3, "test_target")
cb_4 = compiler_begin(ce_d, "test_target")
O_3 = relay.add(cb_3, cb_4)
O_4 = relay.add(cb_3, cb_4)
O_5 = relay.Tuple([O_3, O_4])
ce_4 = compiler_end(O_5, "test_target")
merged = relay.Function([data], ce_4)
region_set = relay.analysis.AnnotatedRegionSet(
merged, relay.op.get("annotation.compiler_begin"), relay.op.get("annotation.compiler_end")
)
assert len(region_set) == 3
check_region(
region_set,
"test_target",
[cb_1],
[cb_1, O_1, O_2, ce_2, ce_3],
[ce_2, ce_3],
)
check_region(
region_set,
"default",
[cb_d],
[cb_d, X, ce_d],
[ce_d],
)
check_region(
region_set,
"test_target",
[cb_3, cb_4],
[cb_3, cb_4, O_3, O_4, O_5, ce_4],
[ce_4],
)
if __name__ == "__main__":
test_region_set_creator_diamond()
test_region_set_creator_merged() |
import os |
import numpy as np |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import relay, te
from tvm.relay.loops |
import while_loop
from tvm.relay.testing |
import run_infer_type as infer_type
from tvm.topi.testing |
import searchsorted_ref
from utils |
import ref_funcs
from utils.assert_diagnostic |
import DiagnosticTesting
def int32(val):
return relay.const(val, "int32")
def any_dims(ndim):
shape = []
for _ in range(ndim):
shape.append(relay.Any())
return tuple(shape)
def check_result(
args,
mod,
expected,
flatten=False,
assert_shape=False,
only_vm=False,
targets=None,
disable_targets=None,
):
if not isinstance(expected, list):
expected = [expected]
for kind in ["debug", "vm"]:
targets = targets or tvm.testing.enabled_targets()
for tgt, dev in targets:
if disable_targets and tgt in disable_targets:
continue
if kind == "debug" and (only_vm or dev.device_type != tvm.cpu().device_type):
continue
result = relay.create_executor(kind, mod=mod, device=dev, target=tgt).evaluate()(*args)
if isinstance(result, tvm.runtime.container.ADT):
result = [r.numpy() for r in result]
else:
result = [result.numpy()]
for r, e in zip(result, expected):
if assert_shape:
assert r.shape == e, "Shape mismatch: expect %s but got %s." % (
str(e),
str(r),
)
else:
if flatten:
r = r.flatten()
e = e.flatten()
tvm.testing.assert_allclose(r, e, atol=2e-6)
def verify_any_broadcast(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op):
dtype = "float32"
x = relay.var("x", shape=x_shape, dtype=dtype)
y = relay.var("y", shape=y_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], op(x, y))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
y_np = np.random.uniform(size=y_np_shape).astype(dtype)
res_np = np_op(x_np, y_np)
check_result([x_np, y_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_broadcast():
verify_any_broadcast((relay.Any |
(),), (3, 2), (1,), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (1, 2), (1, 2), (1, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (1, 2), (3, 2), (1, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, 2), (1, 2), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, relay.Any()), (1, 2), (3, 1), relay.add, np.add)
verify_any_broadcast((relay.Any(),), (3, 2), (2,), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, 2), (3, 2), (3, 2), relay.add, np.add)
def verify_any_elemwise(x_shape, x_np_shape, op, np_op):
dtype = "float32"
x = relay.var("x", shape=x_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], op(x))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
res_np = np_op(x_np)
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_elemwise():
verify_any_elemwise((relay.Any(),), (3,), relay.sqrt, np.sqrt)
verify_any_elemwise((relay.Any(), 2), (5, 2), relay.negative, np.negative)
verify_any_elemwise((relay.Any(), relay.Any()), (5, 4), relay.exp, np.exp)
verify_any_elemwise((relay.Any(),), (3,), relay.round, np.round)
@tvm.testing.uses_gpu
def test_any_broadcast_fail():
def check_fail(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op):
try:
verify_any_broadcast(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op)
except tvm._ffi.base.TVMError:
pass
else:
assert False
check_fail((relay.Any(),), (3, 2), (1,), (4, 2), relay.add, np.add)
check_fail((relay.Any(), 2), (3, 2), (4, 2), (4, 2), relay.add, np.add)
check_fail((relay.Any(), 2), (3, relay.Any()), (1, 2), (4, 1), relay.add, np.add)
check_fail((relay.Any(), 2), (3, 3), (1, 3), (3, 3), relay.add, np.add)
check_fail((relay.Any(),), (3, 2), (2), (4, 2), relay.add, np.add)
def verify_any_full_like(x_shape, x_np_shape, relay_op, np_op, dtype="float32"):
x = relay |
.var("x", shape=x_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay_op(x))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
res_np = np_op(x_np)
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_full_like():
verify_any_full_like(any_dims(3), (2, 3, 5), relay.zeros_like, np.zeros_like, "float32")
verify_any_full_like(any_dims(3), (225, 115, 15), relay.zeros_like, np.zeros_like, "float32")
verify_any_full_like(
any_dims(5), (10, 11, 12, 13, 14), relay.zeros_like, np.zeros_like, "int32"
)
verify_any_full_like(any_dims(3), (2, 3, 5), relay.ones_like, np.ones_like, "float32")
verify_any_full_like(any_dims(3), (225, 115, 15), relay.ones_like, np.ones_like, "float32")
verify_any_full_like(any_dims(5), (10, 11, 12, 13, 14), relay.ones_like, np.ones_like, "int32")
def verify_any_full(x_np_shape, relay_op, np_op, dtype="float32", value=None):
x = relay.var("x", shape=(len(x_np_shape),), dtype="int32")
mod = tvm.IRModule()
out = relay_op(x, dtype) if value is None else relay_op(relay.expr.const(value), x, dtype)
mod["main"] = relay.Function([x], out)
res_np = np_op(x_np_shape) if value is None else np_op(x_np_shape, value)
x_np = np.array(x_np_shape).astype("int32")
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_full():
verify_any_full((2, 3, 5), relay.zeros, np.zeros, "float32")
verify_any_full((225, 115, 15), relay.zeros, np.zeros, "float32")
verify_any_full((10, 11, 12, 13, 14), relay.zeros, np.zeros, "int32")
verify_any_full((2, 3, 5), relay.ones, np.ones, "float32")
verify_any_full((225, 115, 15), relay.ones, np.ones, "float32")
verify_any_full((10, 11, 12, 13, 14), relay.ones, np.ones, "int32")
verify_any_full((10, 11, 12, 13, 14), relay.full, np.full, "float32", 2.0)
verify_any_full((1, 2, 3, 4), relay.full, np.full, "int32", -2)
@tvm.testing.uses_gpu
def test_any_concat():
x = relay.var("x", shape=(rela |
y.Any(), 2), dtype="float32")
y = relay.var("y", shape=(1, 2), dtype="float32")
xx = x - relay.expr.const(3.0)
yy = y * relay.expr.const(5.0)
z = relay.op.concatenate([xx, yy], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.uniform(size=(3, 2)).astype("float32")
y_np = np.random.uniform(size=(1, 2)).astype("float32")
ref = np.concatenate([x_np - 3.0, y_np * 5.0], axis=0)
check_result([x_np, y_np], mod, ref)
num_inputs = 25
x = [relay.var("x", shape=(relay.Any(),), dtype="float32") for _ in range(num_inputs)]
z = relay.op.concatenate(x, axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function(x, z)
x_np = [np.random.uniform(size=(1,)).astype("float32") for _ in range(num_inputs)]
ref = np.concatenate(x_np, axis=0)
check_result(x_np, mod, ref)
def test_oshape(in_vars, axis, oshape):
z = relay.op.concatenate(in_vars, axis=axis)
mod = tvm.IRModule()
mod["main"] = relay.Function(in_vars, z)
typed_mod = relay.transform.InferType()(mod)
assert typed_mod["main"].body.checked_type == relay.TensorType(oshape, dtype="float32")
x = [relay.var("x", shape=(relay.Any(), 3), dtype="float32") for _ in range(3)]
x.append(relay.var("x", shape=(relay.Any(), relay.Any()), dtype="float32"))
test_oshape(x, 0, (relay.Any(), 3))
test_oshape(x, 1, (relay.Any(), relay.Any()))
x = [
relay.var("x", shape=(1, 3), dtype="float32"),
relay.var("x", shape=(1, relay.Any()), dtype="float32"),
]
test_oshape(x, 0, (2, relay.Any()))
test_oshape(x, 1, (1, relay.Any()))
def verify_any_reshape(x_shape, newshape, x_np_shape, out_shape, variable_newshape=False):
x = relay.var("x", shape=x_shape, dtype="float32")
relu_x = relay.nn.relu(x)
data = np.random.uniform(size=x_np_shape).astype("float32")
expected = data.reshape(out_shape)
params = [x]
args = [data]
if variable_newshape:
newshape_var = relay.var("new |
shape", shape=(len(newshape),), dtype="int64")
params.append(newshape_var)
args.append(np.array(newshape, dtype="int64"))
newshape = newshape_var
y = relay.reshape(relu_x, newshape=newshape)
mod = tvm.IRModule()
mod["main"] = relay.Function(params, y)
check_result(args, mod, expected)
@tvm.testing.uses_gpu
def test_any_reshape():
for variable_newshape in [False, True]:
verify_any_reshape(any_dims(3), (1, -1), (2, 3, 4), (1, 24), variable_newshape)
verify_any_reshape(any_dims(3), (0, -1), (2, 3, 4), (2, 12), variable_newshape)
verify_any_reshape(any_dims(3), (0, -2), (2, 3, 4), (2, 3, 4))
verify_any_reshape(any_dims(3), (-4, -1, 2, -3), (6, 3, 4), (3, 2, 12))
verify_any_reshape(any_dims(3), (-4, 2, -1, -2), (6, 3, 4), (2, 3, 3, 4))
verify_any_reshape(any_dims(3), (1, -1, 0), (2, 3, 4), (1, 6, 4))
verify_any_reshape(any_dims(3), (-1, 1, 0), (2, 3, 4), (6, 1, 4))
def verify_any_one_hot(indices_shape, indices_np_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", shape=indices_shape, dtype="int32")
on_value_const = relay.const(on_value, dtype)
off_value_const = relay.const(off_value, dtype)
y = relay.one_hot(indices, on_value_const, off_value_const, depth, axis=axis, dtype=dtype)
params = [indices]
mod = tvm.IRModule()
mod["main"] = relay.Function(params, y)
indices_npy = np.random.randint(0, depth, size=indices_np_shape).astype("int32")
out_npy = tvm.topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
args = [indices_npy]
check_result(args, mod, out_npy)
@tvm.testing.uses_gpu
def test_any_one_hot():
verify_any_one_hot(any_dims(1), (3,), 3, 1, 0, -1, "int32")
verify_any_one_hot(any_dims(2), (2, 2), 5, 0.5, -0.5, 1, "float32")
verify_any_one_hot(any_dims(4), (3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
def verify_any_argwhere(x_shape, x_np_shape, dtype="bool"):
x = relay.var("x", shape=x_shape, dtype=dtype)
y = |
relay.argwhere(x)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
data = np.random.choice([0, 1, 2, 3], size=x_np_shape).astype(dtype)
expected = np.argwhere(data)
check_result([data], mod, expected, flatten=True)
@tvm.testing.uses_gpu
def test_any_argwhere():
verify_any_argwhere(any_dims(1), (5,))
verify_any_argwhere(any_dims(2), (5, 5))
verify_any_argwhere(any_dims(2), (5, 5), "int32")
verify_any_argwhere(any_dims(2), (5, 5), "int8")
verify_any_argwhere(any_dims(3), (5, 5, 5))
verify_any_argwhere(any_dims(4), (5, 5, 5, 5))
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5))
verify_any_argwhere(any_dims(1), (5,), "int32")
verify_any_argwhere(any_dims(3), (5, 5, 5), "int32")
verify_any_argwhere(any_dims(4), (5, 5, 5, 5), "int32")
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5), "int32")
verify_any_argwhere(any_dims(1), (5,), "int8")
verify_any_argwhere(any_dims(3), (5, 5, 5), "int8")
verify_any_argwhere(any_dims(4), (5, 5, 5, 5), "int8")
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5), "int8")
def verify_any_take(data_shape, indices_shape, axis, data_np_shape, indices_np_shape):
mod = tvm.IRModule()
data = relay.var("data", shape=data_shape, dtype="float32")
indices = relay.var("indices", shape=indices_shape, dtype="int32")
y = relay.take(data, indices, axis=axis)
mod["main"] = relay.Function([data, indices], y)
data_np = np.random.uniform(size=data_np_shape).astype("float32")
if axis is None:
max_index = data_np.size
else:
max_index = data_np.shape[axis]
indices_np = np.random.randint(max_index, size=indices_np_shape).astype("int32")
ref = np.take(data_np, indices_np, axis=axis)
check_result([data_np, indices_np], mod, ref)
@tvm.testing.uses_gpu
def test_any_take():
verify_any_take(any_dims(2), (1,), 0, (4, 5), (1,))
verify_any_take(any_dims(2), (), 0, (4, 5), ())
verify_any_take(any_dims(2), (), None, (4, 5), ())
verify_any_take(any_dims(3), |
any_dims(2), 1, (3, 4, 5), (2, 3))
verify_any_take(any_dims(2), any_dims(3), None, (4, 5), (2, 3, 4))
verify_any_take(any_dims(2), any_dims(4), -1, (4, 5), (2, 3, 4, 5))
def verify_any_tile(dshape, reps, np_dshape, np_reps):
mod = tvm.IRModule()
x = relay.var("x", shape=dshape, dtype="float32")
y = relay.tile(x, reps=reps)
mod["main"] = relay.Function([x], y)
x_data = np.random.uniform(size=np_dshape).astype("float32")
ref_res = np.tile(x_data, reps=np_reps)
check_result([x_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_tile():
verify_any_tile(any_dims(3), (3, 2, 1), (2, 3, 4), (3, 2, 1))
verify_any_tile(any_dims(3), (1, 2), (2, 3, 4), (1, 2))
verify_any_tile(any_dims(2), (3, 2, 1), (2, 3), (3, 2, 1))
verify_any_tile(any_dims(3), (1,), (2, 3, 4), (1,))
@tvm.testing.uses_gpu
def test_any_shape_of():
x = relay.var("x", shape=any_dims(2), dtype="float32")
y = relay.shape_of(x)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
data = np.random.uniform(size=(3, 4)).astype("float32")
check_result([data], mod, np.array([3, 4]).astype("int64"))
x = relay.var("x", shape=any_dims(3), dtype="float32")
y0 = relay.shape_of(x)
y1 = relay.take(y0, relay.const(1, "int32"))
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y1)
data = np.random.uniform(size=(2, 3, 4)).astype("float32")
check_result([data], mod, np.array(3).astype("int64"))
class TestAnyReduce:
config = {
"argmax": (relay.argmax, any_dims(3), None, False, False, (3, 4, 5), ()),
"argmin": (relay.argmin, any_dims(4), 1, False, True, (3, 4, 5, 6), (3, 1, 5, 6)),
"all": (relay.all, any_dims(3), (1, 2), True, False, (3, 4, 5), (4, 5)),
"max": (relay.max, any_dims(4), -1, True, True, (3, 4, 5, 6), (1, 1, 1, 6)),
"min": (relay.min, any_dims(3), (0, 1), False, False, (4, 5, 6), (6,)),
"prod": (relay.prod, any_dims(4), 2, True, True, (3, 4, 5, 6), (1, 1, 5, 1)),
"mean": (relay.mean, |
any_dims(2), 0, False, False, (1, 2), (2,)),
"variance": (relay.variance, any_dims(5), (2, 4), False, False, (3, 4, 5, 6, 7), (3, 4, 6)),
}
(
reduce_op,
data_shape,
axis,
exclude,
keepdims,
static_data_shape,
ref_out_shape,
) = tvm.testing.parameters(*config.values(), ids=config.keys())
def test_any_reduce(
self,
target,
dev,
reduce_op,
data_shape,
axis,
exclude,
keepdims,
static_data_shape,
ref_out_shape,
):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and reduce_op == relay.all:
pytest.xfail("Known failing test case for vulkan runtime")
mod = tvm.IRModule()
dtype = "bool" if reduce_op == relay.all else "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = reduce_op(data, axis, keepdims, exclude)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)])
def verify_any_layout_transform(
data_shape, src_layout, dst_layout, static_data_shape, ref_out_shape
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.layout_transform(data, src_layout, dst_layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_layout_transform():
verify_any_layout_transform(any_dims(4), "NCHW", "NHWC", (3, 4, 5, 6), (3, 5, 6, 4))
verify_any_layout_transform(
any_dims(5), "NCHW16c", "NCHW2c", (1, 2, 8, 8, 16), (1, 16, 8, 8, 2)
)
verify_any_layout_transform(any_dims(5), "NCHW6n", "NHWC", (3, 4, 5, 6, 6), (18, 5, 6, 4))
verify_any_layout_transform(any_dims |
(4), "NCHW", "NCHW4c", (3, 4, 5, 6), (3, 1, 5, 6, 4))
verify_any_layout_transform((16, 1), "CH", "C4cH", (16, 1), (4, 4, 1))
def verify_any_expand_dims(data_shape, axis, num_newaxis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.expand_dims(data, axis=axis, num_newaxis=num_newaxis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_expand_dims():
verify_any_expand_dims(any_dims(3), 1, 2, (1, 2, 3), (1, 1, 1, 2, 3))
verify_any_expand_dims(any_dims(3), -1, 2, (1, 2, 3), (1, 2, 3, 1, 1))
def verify_any_transpose(data_shape, axes, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.transpose(data, axes=axes)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.transpose(data_np, axes)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_transpose():
verify_any_transpose(any_dims(3), (1, 0, 2), (10, 3, 2))
verify_any_transpose(any_dims(3), None, (2, 3, 4))
verify_any_transpose(any_dims(6), (0, 1, 3, 2, 5, 4), (11, 12, 2, 1, 9, 17))
verify_any_transpose(any_dims(2), (-1, 0), (3, 2))
def verify_any_squeeze(data_shape, axis, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.squeeze(data, axis=axis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.squeeze(data_np, axis)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_squeeze():
verify_any_squeeze((relay.Any(), relay.Any(), relay.Any()), (0,), (1, 9, 8))
verify_any_squee |
ze((1, relay.Any(), relay.Any()), (0,), (1, 9, 8))
verify_any_squeeze(
(1, relay.Any(), relay.Any(), 1, relay.Any(), relay.Any()), (0, 3), (1, 12, 2, 1, 9, 17)
)
@tvm.testing.uses_gpu
def test_any_reshape_like():
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=(relay.Any(), 3, 10), dtype=dtype)
shape_like = relay.var("data", shape=(relay.Any(), 5, 6), dtype=dtype)
y = relay.reshape_like(data, shape_like)
mod["main"] = relay.Function([data, shape_like], y)
data_np = np.random.uniform(size=(3, 3, 10)).astype(dtype)
shape_like_np = np.random.uniform(size=(3, 5, 6)).astype(dtype)
check_result([data_np, shape_like_np], mod, shape_like_np.shape, assert_shape=True)
def verify_any_conv2d(
data_shape,
kernel_shape,
strides,
padding,
dilation,
static_data_shape,
ref_out_shape,
data_layout="NCHW",
kernel_layout="OIHW",
use_cudnn=False,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d(
data,
kernel,
strides,
padding,
dilation,
kernel_size=kernel_shape[2:4] if kernel_layout == "OIHW" else kernel_shape[0:2],
data_layout=data_layout,
kernel_layout=kernel_layout,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
targets = None
if use_cudnn and tvm.get_global_func("tvm.contrib.cudnn.conv2d.forward", True):
targets = [("cuda -libs=cudnn", tvm.cuda(0))]
check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True, targets=targets)
@tvm.testing.uses_gpu
def test_any_conv2d():
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(1, 1),
(1, 64, 224, 2 |
24),
(1, 64, 224, 224),
)
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(2, 2),
(2, 64, 224, 224),
(2, 64, 222, 222),
)
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(1, 1),
(1, 64, 224, 224),
(1, 64, 224, 224),
use_cudnn=True,
)
verify_any_conv2d(
(relay.Any(), 224, 224, 64),
(3, 3, 64, 64),
(1, 1),
(1, 1),
(1, 1),
(1, 224, 224, 64),
(1, 224, 224, 64),
data_layout="NHWC",
kernel_layout="HWIO",
)
verify_any_conv2d(
(relay.Any(), 224, 224, 64),
(3, 3, 64, 64),
(1, 1),
(1, 1),
(2, 2),
(2, 224, 224, 64),
(2, 222, 222, 64),
data_layout="NHWC",
kernel_layout="HWIO",
)
class TestAnyConv2dNCHWc:
data_shape = tvm.testing.parameter((relay.Any(), 8, 224, 224, 8))
kernel_shape = tvm.testing.parameter((8, 8, 3, 3, 8, 8))
strides = tvm.testing.parameter((1, 1))
padding = tvm.testing.parameter((1, 1))
data_layout = tvm.testing.parameter("NCHW8c")
kernel_layout = tvm.testing.parameter("OIHW8i8o")
out_layout = tvm.testing.parameter("NCHW8c")
dilation, static_data_shape, ref_out_shape = tvm.testing.parameters(
((1, 1), (1, 8, 224, 224, 8), (1, 8, 224, 224, 8)),
((2, 2), (2, 8, 224, 224, 8), (2, 8, 222, 222, 8)),
)
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_conv2d_NCHWc(
self,
target,
dev,
data_shape,
kernel_shape,
strides,
padding,
dilation,
data_layout,
kernel_layout,
out_layout,
static_data_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", |
shape=kernel_shape, dtype=dtype)
y = relay.nn.contrib_conv2d_nchwc(
data,
kernel,
strides,
padding,
dilation,
kernel_size=kernel_shape[2:4],
channels=kernel_shape[0] * kernel_shape[-1],
data_layout=data_layout,
kernel_layout=kernel_layout,
out_layout=out_layout,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result(
[data_np, kernel_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)]
)
def verify_any_conv1d_transpose_ncw(
data_shape,
kernel_shape,
strides,
padding,
dilation,
groups,
static_data_shape,
ref_out_shape,
output_padding,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv1d_transpose(
data,
kernel,
strides,
padding,
dilation,
groups,
kernel_size=kernel_shape[2:],
output_padding=output_padding,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_conv1d_transpose_ncw():
verify_any_conv1d_transpose_ncw(
(relay.Any(), 64, 224),
(64, 192, 3),
(1,),
(1,),
(1,),
1,
(2, 64, 224),
(2, 192, 224),
(0, 0),
)
verify_any_conv1d_transpose_ncw(
(relay.Any(), 32, 224),
(32, 64, 3),
(2,),
(1,),
(1,),
1,
(1, 32, 224),
(1, 64, 448),
(1, |
1),
)
def verify_any_conv2d_transpose_nchw(
data_shape,
kernel_shape,
strides,
padding,
dilation,
groups,
static_data_shape,
ref_out_shape,
output_padding,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d_transpose(
data,
kernel,
strides,
padding,
dilation,
groups,
kernel_size=kernel_shape[2:4],
output_padding=output_padding,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_conv2d_transpose_nchw():
verify_any_conv2d_transpose_nchw(
(relay.Any(), 64, 224, 224),
(64, 192, 3, 3),
(1, 1),
(1, 1),
(1, 1),
1,
(2, 64, 224, 224),
(2, 192, 224, 224),
(0, 0),
)
verify_any_conv2d_transpose_nchw(
(relay.Any(), 32, 224, 224),
(32, 64, 3, 3),
(2, 2),
(1, 1),
(1, 1),
1,
(1, 32, 224, 224),
(1, 64, 448, 448),
(1, 1),
)
def verify_any_pool2d(
pool_type,
data_shape,
pool_size,
strides,
dilation,
padding,
layout,
static_data_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
pool_func = relay.nn.max_pool2d if pool_type == "max" else relay.nn.avg_pool2d
data = relay.var("data", shape=data_shape, dtype=dtype)
y = pool_func(data, pool_size, strides, dilation, padding, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_pool2d(): |
verify_any_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any()),
(3, 3),
(1, 1),
(1, 1),
(1, 1),
"NCHW",
(2, 3, 220, 220),
(2, 3, 220, 220),
)
verify_any_pool2d(
"avg",
(relay.Any(), relay.Any(), relay.Any(), 4),
(1, 1),
(2, 2),
(1, 1),
(0, 0),
"NHWC",
(3, 220, 220, 4),
(3, 110, 110, 4),
)
verify_any_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any(), 4),
(3, 3),
(2, 2),
(1, 1),
(1, 1),
"NCHW4c",
(2, 3, 220, 220, 4),
(2, 3, 110, 110, 4),
)
def verify_any_global_pool2d(pool_type, data_shape, layout, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
pool_func = relay.nn.global_max_pool2d if pool_type == "max" else relay.nn.global_avg_pool2d
data = relay.var("data", shape=data_shape, dtype=dtype)
y = pool_func(data, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_global_pool2d():
verify_any_global_pool2d(
"max", (relay.Any(), 3, relay.Any(), relay.Any()), "NCHW", (2, 3, 220, 220), (2, 3, 1, 1)
)
verify_any_global_pool2d(
"avg", (relay.Any(), relay.Any(), relay.Any(), 4), "NHWC", (3, 220, 220, 4), (3, 1, 1, 4)
)
verify_any_global_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any(), 4),
"NCHW4c",
(2, 3, 220, 220, 4),
(2, 3, 1, 1, 4),
)
def verify_any_split(data_shape, indices_or_sections, axis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.split(data, indices_or_sections, axis)
mod["main"] = relay.Function([data], y.astuple())
data_np = np.random.uni |
form(size=static_data_shape).astype(dtype)
for kind in ["vm"]:
result = relay.create_executor(kind, mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
data_np
)
for ret, ref_ret in zip(result, ref_out_shape):
assert ret.numpy().shape == ref_ret, "Shape mismatch: expect %s but got %s." % (
str(ref_ret),
str(ret.numpy().shape),
)
@tvm.testing.uses_gpu
def test_any_split():
verify_any_split((relay.Any(), 4), 2, -1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), 4), 2, 1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), relay.Any()), 2, 1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), 12), (1, 4, 8), 1, (7, 12), [(7, 1), (7, 3), (7, 4)])
verify_any_split((relay.Any(), relay.Any()), (1, 4, 8), 1, (7, 12), [(7, 1), (7, 3), (7, 4)])
verify_any_split((relay.Any(), 12), (8,), 1, (7, 12), [(7, 8), (7, 4)])
verify_any_split((relay.Any(), relay.Any()), (8,), 1, (7, 12), [(7, 8), (7, 4)])
@tvm.testing.uses_gpu
def test_any_batch_flatten():
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=any_dims(3), dtype=dtype)
y = relay.nn.batch_flatten(data)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=(3, 3, 10)).astype(dtype)
ref_out_shape = (3, 30)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.known_failing_targets("cuda", "vulkan")
class TestAnyDense:
(
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
) = tvm.testing.parameters(
(any_dims(2), any_dims(2), None, (4, 16), (8, 16), (4, 8)),
(any_dims(2), (50, relay.Any()), 50, (4, 40), (50, 40), (4, 50)),
)
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_dense(
self,
target,
dev,
data_shape,
weight_shape,
units,
static_data_ |
shape,
static_weight_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=weight_shape, dtype=dtype)
y = relay.nn.dense(data, weight, units)
mod["main"] = relay.Function([data, weight], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
weight_np = np.random.uniform(size=static_weight_shape).astype(dtype)
check_result(
[data_np, weight_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)]
)
@tvm.testing.parametrize_targets("cuda -libs=cublas")
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_dense_cublas(
self,
target,
dev,
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
):
self.test_any_dense(
target,
dev,
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
)
class TestAnyBatchMatmul:
dtype = tvm.testing.parameter("float32")
executor_kind = tvm.testing.parameter("vm", "debug")
(x_shape, y_shape) = tvm.testing.parameters(
((1, 16, 32), (1, 32, 16)),
((5, 16, 32), (5, 32, 16)),
((5, 16, 32), (5, 32, 20)),
((30, 16, 32), (30, 32, 20)),
)
any_x, any_y = tvm.testing.parameters(
("none", "batch"), ("none", "all"), ("batch", "none"), ("batch", "batch"), ("batch", "all")
)
transpose_x = tvm.testing.parameter(True, False)
transpose_y = tvm.testing.parameter(True, False)
@tvm.testing.fixture
def x_var_shape(self, x_shape, any_x):
if any_x == "none":
return x_shape
elif any_x == "batch":
return tuple(relay.Any() if i == 0 else size for i, size in enumerate( |
x_shape))
elif any_x == "all":
return tuple(relay.Any() for _ in x_shape)
@tvm.testing.fixture
def y_var_shape(self, y_shape, any_y):
if any_y == "none":
return y_shape
elif any_y == "batch":
return tuple(relay.Any() if i == 0 else size for i, size in enumerate(y_shape))
elif any_y == "all":
return tuple(relay.Any() for _ in y_shape)
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_batch_matmul(
self,
target,
dev,
x_shape,
y_shape,
any_x,
any_y,
x_var_shape,
y_var_shape,
transpose_x,
transpose_y,
executor_kind,
dtype,
):
if transpose_x:
x_shape = (x_shape[0], x_shape[2], x_shape[1])
x_var_shape = (x_var_shape[0], x_var_shape[2], x_var_shape[1])
if transpose_y:
y_shape = (y_shape[0], y_shape[2], y_shape[1])
y_var_shape = (y_var_shape[0], y_var_shape[2], y_var_shape[1])
x = relay.var("x", relay.TensorType(x_var_shape, dtype))
y = relay.var("y", relay.TensorType(y_var_shape, dtype))
z = relay.nn.batch_matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
func = relay.Function([x, y], z)
x_np = np.random.uniform(size=x_shape).astype(dtype)
y_np = np.random.uniform(size=y_shape).astype(dtype)
z_np = tvm.topi.testing.batch_matmul(x_np, y_np, trans_x=transpose_x, trans_y=transpose_y)
mod = tvm.ir.IRModule.from_expr(func)
z = relay.create_executor(executor_kind, mod=mod, device=dev, target=target).evaluate()(
x_np, y_np
)
tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5)
@tvm.testing.uses_gpu
def verify_any_pad(data_shape, pad_width, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.pad(data, pad_width)
mod["main"] = relay.F |
unction([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.pad(data_np, pad_width)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_pad():
verify_any_pad(any_dims(3), ((0, 0), (1, 1), (2, 2)), (1, 2, 3))
verify_any_pad(any_dims(4), ((1, 0), (1, 3), (0, 2), (9, 0)), (13, 11, 3, 1))
def verify_any_dilate(data_shape, strides, static_data_shape, dilation_value=None):
assert len(data_shape) == len(strides)
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
if dilation_value is None:
y = relay.nn.dilate(data, strides)
else:
y = relay.nn.dilate(data, strides, dilation_value)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_shape = tuple(
(static_data_shape[i] - 1) * strides[i] + 1 for i in range(len(static_data_shape))
)
if dilation_value is None:
dilation_value = 0.0
ref_out = np.ones(shape=ref_shape, dtype=dtype)
ref_out = dilation_value * ref_out
ref_out[tuple(slice(None, None, strides[i]) for i in range(len(data_shape)))] = data_np
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_dilate():
verify_any_dilate(any_dims(1), (1,), (1,))
verify_any_dilate(any_dims(1), (1,), (5,))
verify_any_dilate(any_dims(1), (5,), (5,))
verify_any_dilate(any_dims(3), (1, 1, 1), (1, 2, 3))
verify_any_dilate(any_dims(3), (1, 1, 2), (1, 2, 3))
verify_any_dilate(any_dims(3), (1, 1, 5), (1, 2, 3))
verify_any_dilate(any_dims(3), (3, 7, 5), (1, 2, 3))
verify_any_dilate(any_dims(4), (3, 7, 1, 5), (1, 2, 3, 4))
verify_any_dilate(any_dims(4), (3, 7, 1, 5), (1, 2, 3, 4), 1.0)
def verify_any_softmax(data_shape, axis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.softmax(data, axis)
mod["main" |
] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_softmax():
verify_any_softmax(any_dims(3), -1, (1, 2, 3), (1, 2, 3))
verify_any_softmax(any_dims(4), 2, (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_relu(data_shape, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.relu(data)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_relu():
verify_any_relu(any_dims(3), (1, 2, 3), (1, 2, 3))
verify_any_relu(any_dims(4), (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_prelu(data_shape, alpha, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
alpha = relay.const(np.array([alpha]), dtype=dtype)
y = relay.nn.prelu(data, alpha)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_prelu():
verify_any_prelu(any_dims(3), 1, (1, 2, 3), (1, 2, 3))
verify_any_prelu(any_dims(4), 2, (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_leaky_relu(data_shape, alpha, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.leaky_relu(data, alpha)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_leaky_relu():
verify_any_leaky_relu(any_dims(3), 0.1, (1, 2, 3), (1 |
, 2, 3))
verify_any_leaky_relu(any_dims(4), 0.2, (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_bias_add(data_shape, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
bias = relay.const(np.random.randn(1), dtype=dtype)
y = relay.nn.bias_add(data, bias)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_bias_add():
verify_any_bias_add(any_dims(3), (1, 2, 3), (1, 2, 3))
verify_any_bias_add(any_dims(4), (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_topk(data_shape, kval, np_dshape, dtype, ret_type="indices", const_k=False):
mod = tvm.IRModule()
data = relay.var("data", shape=data_shape, dtype=dtype)
np_data = np.random.uniform(size=np_dshape).astype(dtype)
if const_k:
k = relay.const(kval)
args = [data]
in_vals = [np_data]
else:
k = relay.var("k", shape=(), dtype="int32")
args = [data, k]
in_vals = [np_data, kval]
out = relay.topk(data, k, ret_type=ret_type)
if ret_type == "both":
out = out[0]
mod["main"] = relay.Function(args, out)
sorted = np.argsort(-np_data)
if len(np_dshape) == 2:
ref_out = sorted[:, 0:kval]
else:
ref_out = sorted[0:kval]
check_result(in_vals, mod, ref_out)
@tvm.testing.uses_gpu
def test_any_topk():
verify_any_topk(any_dims(1), 5, (10,), "float32")
verify_any_topk(any_dims(2), 2, (6, 3), "int32")
verify_any_topk(any_dims(2), 3, (6, 3), "float32", const_k=True)
verify_any_topk(any_dims(1), 0, (0,), "float32", ret_type="both")
def verify_any_get_valid_counts(num_anchor_real, dtype, targets=None):
mod = tvm.IRModule()
batch_size = 1
num_anchor = relay.Any()
data = relay.var("data", shape=(batch_size, num_anchor, 5), dtype=dtype)
np_data = np.random.uniform(siz |
e=(batch_size, num_anchor_real, 5)).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,))
np_out2 = np.zeros(shape=np_data.shape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor_real))
score_threshold = 0.95
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor_real):
score = np_data[i, j, 0]
if score > score_threshold:
for k in range(5):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(5):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
z = relay.vision.get_valid_counts(data, score_threshold, 0, score_index=0)
mod["main"] = relay.Function([data], z.astuple())
check_result([np_data], mod, [np_out1, np_out2, np_out3], targets=targets)
@tvm.testing.uses_gpu
def test_any_get_valid_counts():
verify_any_get_valid_counts(10, "float32")
targets = []
for tgt, dev in tvm.testing.enabled_targets():
if "opencl" not in tgt:
targets.append((tgt, dev))
verify_any_get_valid_counts(0, "float32", targets=targets)
@tvm.testing.uses_gpu
def test_fused_ops():
x = relay.var("x", shape=(relay.Any(), relay.Any()), dtype="float32")
y0 = x + relay.const(1.0, "float32")
y1 = y0 * relay.const(2.0, "float32")
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y1)
data = np.random.uniform(size=(5, 4)).astype("float32")
check_result([data], mod, (data + 1) * 2)
@tvm.testing.uses_gpu
def test_arange_with_dynamic_shape():
m, n, k = relay.Any(), relay.Any(), relay.Any()
x = relay.var("x", shape=(m, n, k), dtype="float32")
y0 = relay.shape_of(x)
y1 = relay.take(y0, relay.const(0, "int32"))
y2 = relay.op.arange(y1, dtype="int32")
y3 = y2 + relay.const(1, dtype="int32")
data = |
np.random.rand(10, 5, 3).astype("float32")
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y3)
check_result([data], mod, np.array(range(10)).astype("int32") + 1)
def verify_any_random_strided_slice(
begin_shape,
end_shape,
strides_shape,
data_shape,
slice_mode="end",
const_attrs=False,
):
np_begin = np.random.randint(2, size=begin_shape, dtype="int32")
np_end = np.random.randint(5, 10, size=end_shape, dtype="int32")
np_strides = np.random.randint(
1, 2 if slice_mode == "size" else 3, size=strides_shape, dtype="int32"
)
verify_any_strided_slice(
np_begin, np_end, np_strides, data_shape, slice_mode=slice_mode, const_attrs=const_attrs
)
def verify_any_strided_slice(
np_begin,
np_end,
np_strides,
data_shape,
axes=None,
slice_mode="end",
const_attrs=False,
):
np_data = np.random.uniform(size=data_shape).astype("float32")
ref_res = tvm.topi.testing.strided_slice_python(
np_data, np_begin, np_end, np_strides, slice_mode, axes
)
mod = tvm.IRModule()
data = relay.var("data", shape=any_dims(len(data_shape)), dtype="float32")
if const_attrs:
begin = relay.const(np_begin)
end = relay.const(np_end)
strides = relay.const(np_strides)
args = [data]
np_inputs = [np_data]
else:
begin = relay.var("begin", shape=np_begin.shape, dtype="int32")
end = relay.var("end", shape=np_end.shape, dtype="int32")
strides = relay.var("strides", shape=np_strides.shape, dtype="int32")
args = [data, begin, end, strides]
np_inputs = [np_data, np_begin, np_end, np_strides]
y = relay.strided_slice(
data, begin=begin, end=end, strides=strides, axes=axes, slice_mode=slice_mode
)
mod["main"] = relay.Function(args, y)
check_result(np_inputs, mod, ref_res)
@tvm.testing.uses_gpu
def test_any_strided_slice():
verify_any_random_strided_slice((2,), (2,), (2,), (15, 21))
verify_any_r |
andom_strided_slice((3,), (3,), (3,), (15, 17, 21))
verify_any_random_strided_slice((3,), (3,), (3,), (23, 29, 41))
verify_any_random_strided_slice((4,), (4,), (4,), (40, 50, 60, 70))
verify_any_random_strided_slice((3,), (3,), (3,), (15, 17, 21), slice_mode="size")
verify_any_random_strided_slice((2,), (2,), (2,), (15, 21), const_attrs=True)
begin = np.array([0, 1000000]).astype("int32")
end = np.array([1000000, -1000000]).astype("int32")
strides = np.array([1, -1]).astype("int32")
verify_any_strided_slice(begin, end, strides, (15, 21), const_attrs=False)
verify_any_strided_slice(begin, end, strides, (15, 21), const_attrs=True)
verify_any_strided_slice(begin, end, strides, (15, 17, 21), axes=[0, 2], const_attrs=True)
@tvm.testing.uses_gpu
def test_recursive_concat():
"""
fn @concat_loop(%i: int32, %st: (any, 1)) -> (any, 1) {
if (%i < 10) {
let %i = reshape(cast(i, "float32"), newshape=(1, ))
let %new_st = concatenate((st, i), axis=0)
concat_loop(%i + 1, )
} else {
st
}
}
"""
i = relay.var("i", shape=(), dtype="int32")
st = relay.var("st", shape=(relay.Any(), 1), dtype="int32")
def _cond(i, st):
return relay.op.min(relay.op.less(i, int32(10)))
def _body(i, st):
i_vec = relay.op.reshape(i, (1, 1))
ret = relay.op.concatenate([st, i_vec], axis=0)
return i + int32(1), ret
loop = while_loop(_cond, [i, st], _body)
start = relay.var("start", shape=(), dtype="int32")
body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1)))
func = relay.Function([start], relay.TupleGetItem(body, 1))
mod = tvm.IRModule()
mod["main"] = func
data = np.array(0.0, dtype="int32")
ref = np.array([0] + list(range(10))).reshape((11, 1)).astype("int32")
check_result([data], mod, ref)
@tvm.testing.uses_gpu
def test_recursive_concat_with_wrong_annotation():
"""
v0.0.1
fn (%start: int32) {
%7 = { |
let %while_loop = fn (%i: int32, %st: Tensor[(1, 1), int32]) {
%0 = less(%i, 10)
%1 = min(%0)
if (%1) {
%2 = add(%i, 1)
%3 = reshape(%i, newshape=[1, 1])
%4 = (%st, %3)
/* The result of concat should be 1,1 but it is 2, 1. */
%5 = concatenate(%4)
%while_loop(%2, %5)
} else {
(%i, %st)
}
}
%6 = reshape(0, newshape=[1, 1])
%while_loop(%start, %6)
}
%7.1
}
"""
i = relay.var("i", shape=(), dtype="int32")
st = relay.var("st", shape=(1, 1), dtype="int32")
def _cond(i, st):
return relay.op.min(relay.op.less(i, int32(10)))
def _body(i, st):
i_vec = relay.op.reshape(i, (1, 1))
ret = relay.op.concatenate([st, i_vec], axis=0)
return i + int32(1), ret
loop = while_loop(_cond, [i, st], _body)
start = relay.var("start", shape=(), dtype="int32")
body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1)))
func = relay.Function([start], relay.TupleGetItem(body, 1))
with DiagnosticTesting() as diagnostics:
diagnostics.assert_message(
"The Relay type checker is unable to show the following types match:\n"
" Tensor[(2, 1), int32]\n"
" Tensor[(1, 1), int32]\n"
"In particular:\n"
" dimension 0 conflicts: 2 does not match 1."
)
func = infer_type(func)
@tvm.testing.uses_gpu
def test_tuple_get_item():
mod = tvm.IRModule()
dtype = "float32"
static_data_shape = (9, 4)
data_shape = (relay.Any(), 4)
indices_or_sections = 2
axis = 1
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.split(data, indices_or_sections, axis)
y = relay.expr.TupleGetItem(y.astuple(), 0)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out_shape = (9, |
2)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_mixed_input_type():
mod = tvm.IRModule()
dtype = "float32"
static_data_shape = (9, 4)
data_shape = (relay.Any(), 4)
tensor_type = relay.TensorType(data_shape, dtype)
tuple_type = relay.TupleType([tensor_type, tensor_type])
data0 = relay.var("d0", type_annotation=relay.TupleType([tuple_type, tensor_type]))
data1 = relay.var("d1", shape=(relay.Any(), 4), dtype=dtype)
data_tuple = relay.expr.TupleWrapper(data0, 2)
nested_data_tuple = relay.expr.TupleWrapper(data_tuple[0], 2)
y = nested_data_tuple[1] * data_tuple[1] + data1
mod["main"] = relay.Function([data0, data1], y)
data_np0 = np.random.uniform(size=static_data_shape).astype(dtype)
data_np1 = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out_shape = (9, 4)
check_result(
[[[data_np0, data_np0], data_np0], data_np1],
mod,
ref_out_shape,
assert_shape=True,
only_vm=True,
)
def verify_any_crop_and_resize(
data_shape,
boxes_shape,
box_indices_shape,
crop_size,
layout,
static_boxes,
static_box_indices_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
indices_dtype = "int32"
data = relay.var("data", shape=data_shape, dtype=dtype)
boxes = relay.var("boxes", shape=boxes_shape, dtype=dtype)
box_indices = relay.var("box_indices", shape=box_indices_shape, dtype=indices_dtype)
y = relay.image.crop_and_resize(data, boxes, box_indices, crop_size, layout)
mod["main"] = relay.Function([data, boxes, box_indices], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
boxes_np = np.random.uniform(size=static_boxes).astype(dtype)
box_indices_np = np.random.uniform(size=static_box_indices_shape).astype(indices_dtype)
check_result([data_np, boxes_np, box_indices_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_crop_and_resize(): |
verify_any_crop_and_resize(
data_shape=(1, 234, 234, 256),
boxes_shape=(relay.Any(), 4),
box_indices_shape=(relay.Any(),),
crop_size=(14, 14),
layout="NHWC",
static_boxes=(128, 4),
static_box_indices_shape=(128,),
ref_out_shape=(128, 14, 14, 256),
)
verify_any_crop_and_resize(
data_shape=(1, 256, 234, 234),
boxes_shape=(relay.Any(), 4),
box_indices_shape=(relay.Any(),),
crop_size=(14, 14),
layout="NCHW",
static_boxes=(128, 4),
static_box_indices_shape=(128,),
ref_out_shape=(128, 256, 14, 14),
)
def verify_any_mirror_pad(data_shape, pad_width, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.mirror_pad(data, pad_width)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_mirror_pad():
verify_any_mirror_pad(
data_shape=(1, 256, 232, 232),
pad_width=((0, 0), (0, 0), (1, 1), (1, 1)),
static_data_shape=(1, 256, 232, 232),
ref_out_shape=(1, 256, 234, 234),
)
def verify_any_ndarray_size(data_np_shape):
v = relay.var("v", shape=any_dims(len(data_np_shape)), dtype="float32")
n = relay.ndarray_size(v, dtype="int32")
mod = tvm.IRModule()
mod["main"] = relay.Function([v], n)
np_data = np.zeros(data_np_shape, dtype="float32")
ref_res = np.size(np_data)
check_result([np_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_ndarray_size():
verify_any_ndarray_size((2,))
verify_any_ndarray_size((2, 2))
verify_any_ndarray_size((1, 2, 3, 4))
def verify_any_resize2d(data_shape, scale, layout, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype) |
if layout == "NHWC":
size = (data_shape[1] * scale, data_shape[2] * scale)
else:
size = (data_shape[2] * scale, data_shape[3] * scale)
y = relay.image.resize2d(data, size, None, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_resize():
verify_any_resize2d(
data_shape=(relay.Any(), 4, 4, 4),
scale=2,
layout="NHWC",
static_data_shape=(1, 4, 4, 4),
ref_out_shape=(1, 8, 8, 4),
)
verify_any_resize2d(
data_shape=(relay.Any(), 8, 17, 20),
scale=3,
layout="NCHW",
static_data_shape=(2, 8, 17, 20),
ref_out_shape=(2, 8, 51, 60),
)
def verify_any_grid_sample(data_shape, grid_shape, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
grid = relay.var("grid", shape=grid_shape, dtype=dtype)
y = relay.image.grid_sample(data, grid)
mod["main"] = relay.Function([data, grid], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
grid_np = np.random.uniform(size=grid_shape).astype(dtype)
check_result([data_np, grid_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_grid_sample():
verify_any_grid_sample(
data_shape=(relay.Any(), 4, 16, 32),
grid_shape=(4, 2, 8, 8),
static_data_shape=(4, 4, 16, 32),
ref_out_shape=(4, 4, 8, 8),
)
verify_any_grid_sample(
data_shape=(relay.Any(), 4, 16, 32),
grid_shape=(4, 2, 32, 32),
static_data_shape=(4, 4, 16, 32),
ref_out_shape=(4, 4, 32, 32),
)
def verify_any_affine_grid(num_batch, static_num_batch, target_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data_shape = (num_batch, 2, 3)
static_data_shape = (static_num_batch, 2, 3)
data = |
relay.var("data", shape=data_shape, dtype=dtype)
y = relay.image.affine_grid(data, target_shape)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_affine_grid():
verify_any_affine_grid(
num_batch=relay.Any(),
static_num_batch=1,
target_shape=(16, 32),
ref_out_shape=(1, 2, 16, 32),
)
verify_any_affine_grid(
num_batch=relay.Any(),
static_num_batch=8,
target_shape=(32, 32),
ref_out_shape=(8, 2, 32, 32),
)
def test_any_consecutive_broadcast():
dtype = "float32"
data0 = relay.var("data0", shape=any_dims(2), dtype=dtype)
data1 = relay.var("data1", shape=any_dims(2), dtype=dtype)
data2 = relay.var("data2", shape=any_dims(2), dtype=dtype)
data3 = relay.var("data3", shape=any_dims(2), dtype=dtype)
out0 = data0 + data1
out1 = data0 * data1
out2 = out0 - out1
out3 = data2 + data3
out4 = data2 * data3
out5 = out3 - out4
out6 = out2 * out5
mod = tvm.IRModule()
mod["main"] = relay.Function([data0, data1, data2, data3], out6)
np_data0 = np.random.uniform(size=(1, 4)).astype(dtype)
np_data1 = np.random.uniform(size=(2, 4)).astype(dtype)
np_data2 = np.random.uniform(size=(1, 4)).astype(dtype)
np_data3 = np.random.uniform(size=(2, 4)).astype(dtype)
ref_res = ((np_data0 + np_data1) - (np_data0 * np_data1)) * (
(np_data2 + np_data3) - (np_data2 * np_data3)
)
check_result([np_data0, np_data1, np_data2, np_data3], mod, ref_res)
def test_reshape_concat():
dtype = "float32"
d0 = relay.var("d0", shape=any_dims(2), dtype=dtype)
d1 = relay.var("d1", shape=any_dims(3), dtype=dtype)
out = relay.op.concatenate([relay.op.reshape(d0, [-1]), relay.op.reshape(d1, [-1])], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([d0, d1], out)
np_data0 = np.random.uniform( |
size=(4, 5)).astype(dtype)
np_data1 = np.random.uniform(size=(2, 5, 2)).astype(dtype)
ref_res = np.concatenate([np.reshape(np_data0, [-1]), np.reshape(np_data1, [-1])], axis=0)
check_result([np_data0, np_data1], mod, ref_res)
d0 = relay.var("d0", shape=any_dims(2), dtype=dtype)
d1 = relay.var("d1", shape=any_dims(2), dtype=dtype)
s0 = relay.var("s0", shape=any_dims(3), dtype=dtype)
s1 = relay.var("s1", shape=any_dims(3), dtype=dtype)
out = relay.op.concatenate(
[relay.op.reshape_like(d0, s0), relay.op.reshape_like(d1, s1)], axis=0
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d0, d1, s0, s1], out)
np_data0 = np.random.uniform(size=(4, 5)).astype(dtype)
np_data1 = np.random.uniform(size=(8, 5)).astype(dtype)
np_shape_like0 = np.random.uniform(size=(2, 2, 5)).astype(dtype)
np_shape_like1 = np.random.uniform(size=(4, 2, 5)).astype(dtype)
ref_res = np.concatenate(
[np.reshape(np_data0, np_shape_like0.shape), np.reshape(np_data1, np_shape_like1.shape)],
axis=0,
)
check_result([np_data0, np_data1, np_shape_like0, np_shape_like1], mod, ref_res)
def test_any_adv_index():
data = relay.var("data", shape=(5, relay.Any(), relay.Any()), dtype="float32")
index0 = relay.var("index0", shape=(1, relay.Any()), dtype="int64")
index1 = relay.var("index1", shape=(relay.Any(), 1), dtype="int64")
out = relay.adv_index([data, index0, index1])
mod = tvm.IRModule()
mod["main"] = relay.Function([data, index0, index1], out)
np_data_shape = (5, 5, 10)
np_index0_shape = (1, 4)
np_index1_shape = (4, 1)
np_data = np.random.uniform(size=np_data_shape).astype("float32")
np_index0 = np.random.uniform(0, np_data_shape[0], size=np_index0_shape).astype("int64")
np_index1 = np.random.uniform(0, np_data_shape[0], size=np_index1_shape).astype("int64")
ref_res = np_data[tuple([np_index0, np_index1])]
print(ref_res.shape)
check_result([np_data, np_index0, np_index1], mod, ref_res)
def verif |
y_any_repeat(data_shape, np_dshape, repeats, axis):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.repeat(data, repeats, axis)
mod["main"] = relay.Function([data], y)
np_data = np.random.uniform(size=np_dshape).astype(dtype)
ref_res = np.repeat(np_data, repeats, axis)
check_result([np_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_repeat():
verify_any_repeat(any_dims(2), (1, 2), 2, 0)
verify_any_repeat(any_dims(1), (3,), 3, -1)
verify_any_repeat(any_dims(4), (2, 1, 1, 4), 4, 2)
def verify_any_stack(data_shape, np_dshape, num_data, axis):
mod = tvm.IRModule()
dtype = "float32"
inputs = []
for i in range(num_data):
inputs.append(relay.var("data{}".format(i), shape=data_shape, dtype=dtype))
y = relay.stack(inputs, axis)
mod["main"] = relay.Function(inputs, y)
np_inputs = []
for _ in range(num_data):
np_inputs.append(np.random.uniform(size=np_dshape).astype(dtype))
ref_res = np.stack(np_inputs, axis)
check_result(np_inputs, mod, ref_res)
@tvm.testing.uses_gpu
def test_any_stack():
verify_any_stack(any_dims(2), (1, 2), 3, 0)
verify_any_stack(any_dims(1), (3,), 4, -1)
verify_any_stack(any_dims(4), (2, 1, 1, 4), 2, 2)
def verify_any_where(
cond_shape, x_shape, y_shape, cond_np_shape, x_np_shape, y_np_shape, y_np_shape_invalid=None
):
dtype = "float32"
cond = relay.var("cond", shape=cond_shape, dtype="bool")
x = relay.var("x", shape=x_shape, dtype=dtype)
y = relay.var("y", shape=y_shape, dtype=dtype)
z = relay.where(cond, x, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([cond, x, y], z)
cond_np = np.random.randn(*cond_np_shape) > 0
x_np = np.random.randn(*x_np_shape).astype(dtype)
y_np = np.random.randn(*y_np_shape).astype(dtype)
expected = np.where(cond_np, x_np, y_np)
check_result([cond_np, x_np, y_np], mod, expected)
if y_np_shape_invalid:
y_np_bad = np.ran |
dom.randn(*y_np_shape_invalid).astype(dtype)
try:
check_result([cond_np, x_np, y_np_bad], mod, expected)
except tvm.error.TVMError as e:
error_msg = str(e).split("\n")[-1]
assert "Invalid broadcast shapes" in error_msg
@tvm.testing.uses_gpu
def test_any_where():
verify_any_where(any_dims(1), (5,), (5,), (5,), (5,), (5,))
verify_any_where(any_dims(1), any_dims(1), (5,), (5,), (5,), (5,))
verify_any_where(any_dims(1), any_dims(1), any_dims(1), (5,), (5,), (5,))
verify_any_where((5,), any_dims(1), any_dims(1), (5,), (5,), (5,))
verify_any_where(any_dims(1), any_dims(1), any_dims(1), (5,), (1,), (5,))
verify_any_where(any_dims(1), any_dims(2), any_dims(2), (5,), (5, 5), (5, 5))
verify_any_where(any_dims(1), any_dims(1), any_dims(2), (5,), (5,), (5, 5))
verify_any_where(
any_dims(2), any_dims(2), any_dims(2), (3, 4), (3, 1), (1, 4), y_np_shape_invalid=(2, 4)
)
x = relay.var("x", shape=any_dims(1), dtype="int64")
y = relay.var("y", shape=any_dims(2), dtype="float32")
left = relay.take(x, relay.const(1, dtype="int32")) + relay.const(4, "int64")
right = relay.const(4, "int64")
where = relay.where(relay.const(False, "bool"), left, right)
z = relay.take(y, where, axis=1)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.randn(2).astype("int64")
y_np = np.random.randn(2, 6).astype("float32")
expected = y_np[:, 4]
check_result([x_np, y_np], mod, expected)
@tvm.testing.uses_gpu
def test_non_max_suppression():
x0 = relay.var("x0", relay.ty.TensorType((1, relay.Any(), 6), "float32"))
x1 = relay.var("x1", relay.ty.TensorType((1,), "int32"))
x2 = relay.var("x2", relay.ty.TensorType((1, relay.Any()), "int32"))
x3 = relay.var("x3", relay.ty.TensorType((), "int32"))
z = relay.vision.non_max_suppression(
x0,
x1,
x2,
x3,
iou_threshold=0.5,
force_suppress=True,
top_k=2, |
return_indices=True,
invalid_to_bottom=False,
)
z = z.astuple()
func = relay.Function([x0, x1, x2, x3], z)
mod = tvm.IRModule()
mod["main"] = func
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45],
[1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40],
[2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110],
]
]
).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_indices = np.array([[0, 1, 3, 4, -1]]).astype("int32")
np_max_output_size = -1
np_indices_result = np.array([[4, 0, -1, -1, -1]])
np_valid_box_count = np.array([[2]]).astype("int32")
check_result(
[np_data, np_valid_count, np_indices, np_max_output_size],
mod,
[np_indices_result, np_valid_box_count],
only_vm=False,
)
np_data = np.zeros((1, 0, 6)).astype("float32")
np_valid_count = np.array([0]).astype("int32")
np_indices = np.zeros((1, 0)).astype("int32")
np_max_output_size = -1
np_indices_result = np.zeros((1, 0))
np_valid_box_count = np.array([[0]]).astype("int32")
check_result(
[np_data, np_valid_count, np_indices, np_max_output_size],
mod,
[np_indices_result, np_valid_box_count],
only_vm=False,
)
@tvm.testing.uses_gpu
def test_all_class_non_max_suppression():
def verify_all_class_non_max_suppression(
boxes_np,
scores_np,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected,
output_format="onnx",
):
batch_size = boxes_np.shape[0]
num_classes = scores_np.shape[1]
num_boxes = relay.Any()
boxes = relay.var("boxes", relay.ty.TensorType((batch_size, num_boxes, 4), "float32"))
scores = relay.var(
"scores", relay.ty.TensorType((batch_size, num_classes, num_boxes), "float32")
)
nms_out = relay.vision.all_class_non_max |
_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_format
)
if output_format == "onnx":
three = relay.const(np.array([3]), dtype="int64")
begin = relay.const(np.array([0, 0]), dtype="int64")
end = relay.op.concatenate([nms_out[1], three], axis=0)
strides = relay.const(np.array([1, 1]), dtype="int64")
out = relay.op.strided_slice(nms_out[0], begin, end, strides)
mod = tvm.IRModule()
mod["main"] = relay.Function([boxes, scores], out)
check_result([boxes_np, scores_np], mod, [expected])
else:
out = nms_out.tuple_value
mod = tvm.IRModule()
mod["main"] = relay.Function([boxes, scores], out)
check_result([boxes_np, scores_np], mod, expected)
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.5, 0.5, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.8, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = 2
iou_threshold = 0.8
score_threshold = 0.4
expected = np.array([[0, 0, 4], [0, 0, 2], [0, 1, 4], [0, 1, 0]])
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
expected = [
np.array(
[[[0, 4], [0, 2], [1, 4], [1, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]]
),
np.array(
[
[
0.9,
0.6,
0.9,
0.8,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.