text
stringlengths 1
2.05k
|
---|
ypeVar("b")
c = relay.TypeVar("c")
ft1 = relay.FuncType([b], c, [a])
assert_vars_match(all_type_vars(ft1), [a, b, c])
ft2 = relay.FuncType([], relay.TupleType([a, b, c]), [])
assert_vars_match(all_type_vars(ft2), [a, b, c])
w = relay.Var("w")
x = relay.Var("x", a)
y = relay.Var("y", b)
z = relay.Var("z", c)
f1 = relay.Function([x], y, b, [a])
assert_vars_match(all_type_vars(f1), [a, b])
f2 = relay.Function([x], relay.Let(y, x, z))
assert_vars_match(all_type_vars(f2), [a, b, c])
f3 = relay.Function([], relay.Tuple([x, y, z]), ret_type=relay.TupleType([a, b, c]))
assert_vars_match(all_type_vars(f3), [a, b, c])
f4 = relay.Function([w], relay.Tuple([]), type_params=[a, b, c])
assert_vars_match(all_type_vars(f4), [a, b, c])
f5 = relay.Function([w], w)
assert len(all_type_vars(f5)) == 0 |
import pytest |
import os |
import time |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.relay |
import transform, build_module
from tvm.relay.testing |
import run_opt_pass
from tvm.contrib |
import graph_executor, pipeline_executor, pipeline_executor_build
from tvm._ffi |
import get_global_func
from tvm.contrib |
import cc as _cc
def graph_split(expr, split_conf, params=None):
"""Splitting the graph into a list of subgraphs"""
def get_dep_var(sub_var_dep):
return [var for var in sub_var_dep[len(sub_var_dep) - 1]["ref_nodes"]]
def parse_dependency(value, snode_dep, new_input_idx):
new_args = []
need_update = False
for var in value.args:
is_free_var = False
for dep in snode_dep[:-1]:
if var in dep["nodes"]:
dep["nodes"][var] += 1
dep["ref_nodes"][var] = dep["nodes"][var]
is_free_var = True
if is_free_var:
need_update = True
new_args.append(relay.var(f"data_n_{new_input_idx}", var.checked_type))
new_input_idx += 1
else:
new_args.append(var)
if need_update:
value = tvm.relay.expr.Call(
value.op, new_args, value.attrs, value.type_args, value.span
)
return value, snode_dep, new_input_idx
def merge_constant_expr(constant_expr, expr):
if not isinstance(constant_expr.body, tvm.relay.expr.Let):
return tvm.relay.expr.Let(constant_expr.var, constant_expr.value, expr)
return tvm.relay.expr.Let(
constant_expr.var, constant_expr.value, merge_constant_expr(constant_expr.body, expr)
)
def _recursion(anf, pipeline_mods, split_conf, constant_expr):
nonlocal operator_index_map
nonlocal new_input_idx
nonlocal snode_dep
cur_node_dep = snode_dep[len(snode_dep) - 1]
if isinstance(anf, tvm.relay.Function):
return tvm.relay.Function(
anf.params,
_recursion(anf.body, pipeline_mods, split_conf, constant_expr),
anf.ret_type,
anf.type_params,
anf.attrs,
)
if isinstance |
(anf, tvm.relay.expr.Let):
value = anf.value
if isinstance(value, tvm.relay.expr.Constant):
if not constant_expr:
constant_expr = tvm.relay.expr.Let(anf.var, value, anf.var)
else:
constant_expr = tvm.relay.expr.Let(anf.var, value, constant_expr)
if isinstance(value, tvm.relay.expr.Call):
new_args = []
cur_node_dep["nodes"][anf.var] = 0
value, snode_dep, new_input_idx = parse_dependency(value, snode_dep, new_input_idx)
if isinstance(value.op, tvm.ir.Op):
if value.op.name in operator_index_map:
operator_index_map[value.op.name] += 1
else:
operator_index_map[value.op.name] = 0
split_operator_name = split_conf[0]["op_name"] if split_conf else ""
split_operator_index = split_conf[0]["op_index"] if split_conf else ""
if (
split_conf
and split_operator_name in operator_index_map
and operator_index_map[split_operator_name] >= split_operator_index
):
split_conf.pop(0)
snode_dep.append({"nodes": {}, "ref_nodes": {}})
ann = _recursion(
anf.body,
pipeline_mods,
split_conf,
constant_expr,
)
snode_dep.pop()
dep_vars = get_dep_var(snode_dep)
body = relay.Tuple(dep_vars) if len(dep_vars) > 1 else anf.var |
if constant_expr:
ann = merge_constant_expr(constant_expr, ann)
ann = run_opt_pass(ann, transform.ToGraphNormalForm())
mod = tvm.IRModule.from_expr(ann)
pipeline_mods.insert(0, mod)
return tvm.relay.expr.Let(anf.var, value, body)
return tvm.relay.expr.Let(
anf.var,
value,
_recursion(anf.body, pipeline_mods, split_conf, constant_expr),
)
else:
return anf
snode_dep = [{"nodes": {}, "ref_nodes": {}}]
pipeline_mods = []
operator_index_map = {}
new_input_idx = 0
constant_expr = None
subgraph_split_conf = split_conf.copy()
if params:
expr = build_module.bind_params_by_name(expr, params)
anf = run_opt_pass(expr, transform.ToANormalForm())
anf = run_opt_pass(anf, transform.InferType())
ann = _recursion(
anf,
pipeline_mods,
subgraph_split_conf,
constant_expr,
)
ann = run_opt_pass(ann.body, transform.ToGraphNormalForm())
mod = tvm.IRModule.from_expr(ann)
pipeline_mods.insert(0, mod)
return pipeline_mods
def get_network():
mods = []
dshape = (3, 3)
data = relay.var("data_0", relay.TensorType(dshape, "float32"))
data21 = relay.var("data_1", relay.TensorType(dshape, "float32"))
data_net1_output_1 = relay.var("data_0", relay.TensorType(dshape, "float32"))
data_net1_output_2 = relay.var("data_1", relay.TensorType(dshape, "float32"))
data_net2_output_1 = relay.var("data_0", relay.TensorType(dshape, "float32"))
mvalue1 = np.full((1), 1).astype("float32")
mvalue2 = np.full((1), 2).astype("float32")
mvalue3 = np.full((1), 3).astype("float32")
mv1 = relay.Constant(tvm.nd.array(mvalue1))
mv2 = relay.Constant(tvm.nd.array(mvalue2))
mv3 = relay.Constant(tvm.nd.array(mvalue3) |
)
net1_output1 = relay.add(data, mv1)
net1_output2 = relay.subtract(data, mv2)
net1_output3 = relay.concatenate((net1_output1, net1_output2), axis=0)
(net1_output3, _) = relay.split(net1_output3, indices_or_sections=2, axis=0)
net1_output3 = relay.add(net1_output3, mv2)
net2 = relay.add(net1_output3, mv2)
net2 = relay.add(net2, data21)
net2_output = relay.add(net2, mv3)
net3 = relay.multiply(net2_output, mv3)
net3 = relay.add(net3, net1_output2)
return tvm.IRModule.from_expr(relay.Function([data, data21], relay.Tuple([net3]))), dshape
def get_split_mod():
mod, dshape = get_network()
split_conf = [{"op_name": "add", "op_index": 1}, {"op_name": "add", "op_index": 4}]
mods = graph_split(mod["main"], split_conf)
return mods, dshape
def get_mannual_mod():
mods = []
dshape = (3, 3)
data = relay.var("data_0", relay.TensorType(dshape, "float32"))
data21 = relay.var("data_1", relay.TensorType(dshape, "float32"))
data_net1_output_1 = relay.var("data_0", relay.TensorType(dshape, "float32"))
data_net1_output_2 = relay.var("data_1", relay.TensorType(dshape, "float32"))
data_net2_output_1 = relay.var("data_0", relay.TensorType(dshape, "float32"))
mvalue1 = np.full((1), 1).astype("float32")
mvalue2 = np.full((1), 2).astype("float32")
mvalue3 = np.full((1), 3).astype("float32")
mv1 = relay.Constant(tvm.nd.array(mvalue1))
mv2 = relay.Constant(tvm.nd.array(mvalue2))
mv3 = relay.Constant(tvm.nd.array(mvalue3))
net1_output1 = relay.add(data, mv1)
net1_output2 = relay.subtract(data, mv2)
net1_output3 = relay.multiply(data, mv3)
net2 = relay.add(data_net1_output_1, mv2)
net2 = relay.add(net2, data21)
net2_output = relay.add(net2, mv3)
net3 = relay.multiply(data_net2_output_1, mv3)
net3 = relay.add(net3, data_net1_output_2)
mods.append(
tvm.IRModule.from_expr(
relay.Function([data], relay.Tuple([net1_output1 |
, net1_output2, net1_output3]))
)
)
mods.append(tvm.IRModule.from_expr(relay.Function([data_net1_output_1, data21], net2_output)))
mods.append(
tvm.IRModule.from_expr(relay.Function([data_net1_output_2, data_net2_output_1], net3))
)
return mods, dshape
def get_manual_conf(mods, target):
mod_config = {}
pipe_config1 = {
"mod_idx": 0,
"cpu_affinity": "0",
"output": [
{"output_idx": 0, "dependencies": [{"mod_idx": 1, "input_name": "data_n_0"}]},
{"output_idx": 1, "dependencies": [{"mod_idx": 2, "input_name": "data_n_2"}]},
],
}
mod_config[mods[0]] = {
"pipeline": pipe_config1,
"target_host": None,
"mod_name": "default",
"build": None,
"params": None,
"target": target[0],
"fcompile": _cc.create_shared,
"dev": target[1],
}
pipe_config2 = {
"mod_idx": 1,
"cpu_affinity": "0",
"output": [
{"output_idx": 0, "dependencies": [{"mod_idx": 2, "input_name": "data_n_1"}]},
],
}
mod_config[mods[1]] = {
"pipeline": pipe_config2,
"target_host": None,
"mod_name": "default",
"build": None,
"params": None,
"target": "llvm",
"fcompile": None,
"dev": tvm.cpu(0),
}
pipe_config3 = {
"mod_idx": 2,
"cpu_affinity": "0",
"output": [{"output_idx": 0, "dependencies": [{"global_output_index": 0}]}],
}
mod_config[mods[2]] = {
"pipeline": pipe_config3,
"target_host": None,
"mod_name": "default",
"build": None,
"params": None,
"target": "llvm",
"fcompile": None,
"dev": tvm.cpu(0),
}
return mod_config
def recreate_parameters(mod):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, "llvm")
mod_customized_params = {}
for key, value in lib.params.items():
new_value = value.n |
umpy() + np.full(value.shape, 10).astype(value.dtype)
mod_customized_params[key] = tvm.nd.array(new_value)
return mod_customized_params, mod
def run_modules(
mod_configs,
dev,
target,
global_input_name,
global_input_data,
mod_set_input,
input_name,
input_data,
params_mod=None,
params=None,
):
mod_input = {}
final_output = {}
idx = 0
for mod in mod_configs:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target)
m = graph_executor.GraphModule(lib["default"](dev))
if idx in mod_input:
for input in mod_input[idx]:
input = mod_input[idx][input]
m.set_input(input["index"], input["data"])
else:
m.set_input(global_input_name, global_input_data)
if mod == mod_set_input:
m.set_input(input_name, input_data)
if params_mod == mod:
m.set_input(None, None, **params)
m.run()
n = m.get_num_outputs()
mconfig = mod_configs[mod]
for output in mconfig["pipeline"]["output"]:
output_data = m.get_output(output["output_idx"]).numpy()
for dep in output["dependencies"]:
is_global = False
if "global_output_index" in dep:
is_global = True
name = dep["global_output_index"]
else:
mod_idx = dep["mod_idx"]
name = dep["input_name"]
if is_global:
final_output[name] = output_data
else:
if mod_idx in mod_input:
mod_input[mod_idx][name] = {"index": name, "data": output_data}
else:
mod_input[mod_idx] = {name: {"index": name, "data": output_data}}
idx = idx + 1
return final_output
def reset_cpu_affinity(affinity):
config_threadpool = ge |
t_global_func("runtime.config_threadpool")
config_threadpool(-2, 0)
os.sched_setaffinity(0, affinity)
def test_pipe_runtime_error_check():
if pipeline_executor_build.pipeline_executor_build_enabled():
(mod1, mod2, mod3), dshape = get_split_mod()
pipe_error = pipeline_executor_build.PipelineConfig()
with pytest.raises(RuntimeError):
pipe_error[mod1]["output"][9]
with pytest.raises(RuntimeError):
pipe_error[mod1]["input"]["data_9"]
with pytest.raises(RuntimeError):
pipe_error[mod1]["output"][0].connect(pipe_error[mod2]["input"]["data_0"])
pipe_error[mod2]["output"][0].connect(pipe_error[mod1]["input"]["data_0"])
with pytest.raises(RuntimeError):
pipe_error[mod1]["output"][0].connect(pipe_error[mod1]["input"]["data_0"])
with pytest.raises(RuntimeError):
pipe_error[mod1]["input"]["data_0"].connect(pipe_error[mod1]["input"]["data_0"])
with pytest.raises(RuntimeError):
pipe_error[mod1]["input"]["data_0"].connect(pipe_error[mod2]["input"]["data_0"])
with pytest.raises(RuntimeError):
pipe_error[mod1]["output"][0].connect(pipe_error["input"]["data_0"])
with pytest.raises(RuntimeError):
pipe_error["input"]["data_0"].connect(pipe_error[mod1]["output"][0])
with pytest.raises(RuntimeError):
pipe_error["output"]["0"].connect(pipe_error[mod1]["output"][0])
pipe_config = pipeline_executor_build.PipelineConfig()
pipe_config[mod1].target = "llvm"
pipe_config[mod1].dev = tvm.cpu(0)
pipe_config["param_group"]["param_0"].connect(pipe_config[mod1]["param"])
pipe_config[mod1]["output"][0].connect(pipe_config["output"]["0"])
with tvm.transform.PassContext(opt_level=3):
pipeline_mod_factory = pipeline_executor_build.build(pipe_config)
pipeline_module = pipeline_executor.PipelineModule |
(pipeline_mod_factory)
customized_parameters, _ = recreate_parameters(mod1)
with pytest.raises(RuntimeError):
pipeline_module.set_params("param_0", None)
with pytest.raises(RuntimeError):
pipeline_module.set_params("param_1", customized_parameters)
def test_pipeline():
if pipeline_executor_build.pipeline_executor_build_enabled():
target_list = tvm.testing.enabled_targets()
for target in target_list:
affinity = os.sched_getaffinity(0)
(mod1, mod2, mod3), dshape = get_split_mod()
datas = []
for i in range(5):
datas.append(np.full(dshape, 3 + i).astype("float32"))
pipe_config = pipeline_executor_build.PipelineConfig()
customized_parameters, customized_parameters_mod = recreate_parameters(mod1)
assert customized_parameters_mod == mod1
pipe_config["param_group"]["param_0"].connect(pipe_config[mod1]["param"])
pipe_config["input"]["data_a"].connect(pipe_config[mod1]["input"]["data_0"])
pipe_config["input"]["data_b"].connect(pipe_config[mod2]["input"]["data_1"])
pipe_config[mod1]["output"][0].connect(pipe_config[mod2]["input"]["data_n_0"])
pipe_config[mod1]["output"][1].connect(pipe_config[mod3]["input"]["data_n_2"])
pipe_config[mod2]["output"][0].connect(pipe_config[mod3]["input"]["data_n_1"])
pipe_config[mod3]["output"][0].connect(pipe_config["output"]["0"])
pipe_config[mod1].target = target[0]
pipe_config[mod1].dev = target[1]
pipe_config[mod1].cpu_affinity |
= "0"
pipe_config[mod1].fcompile = _cc.create_shared
pipe_config[mod2].target = "llvm"
pipe_config[mod2].dev = tvm.cpu(0)
pipe_config[mod2].cpu_affinity = "0"
pipe_config[mod3].target = "llvm"
pipe_config[mod3].dev = tvm.cpu(0)
pipe_config[mod3].cpu_affinity = "0"
mconfig = pipe_config.get_config()
assert mconfig["module_connection"] == get_manual_conf([mod1, mod2, mod3], target)
with tvm.transform.PassContext(opt_level=3):
pipeline_mod_factory = pipeline_executor_build.build(pipe_config)
directory_path = tvm.contrib.utils.tempdir().temp_dir
if not os.path.exists(directory_path):
os.makedirs(directory_path)
config_file_name = pipeline_mod_factory.export_library(directory_path)
pipeline_module = pipeline_executor.PipelineModule(pipeline_mod_factory)
assert pipeline_module
pipeline_module_test = pipeline_executor.PipelineModule.load_library(config_file_name)
assert pipeline_module_test.num_outputs == 1
input_map = pipeline_module_test.get_input_pipeline_map("data_b")
assert input_map[0] == "1" and input_map[1] == "data_1"
input_map = pipeline_module_test.get_input_pipeline_map("data_a")
assert input_map[0] == "0" and input_map[1] == "data_0"
module_index = pipeline_module_test.get_params_group_pipeline_map("param_0")
assert module_index == 0
pipeline_module_test.set_params("param_0", customized_parameters)
normal_outputs = []
for round in range(0, len(datas)):
data = datas[round]
wrong_output = run_modules(
mconfig["module_connection"],
tvm.cpu(),
"llvm", |
"data_0",
data,
mod2,
"data_1",
data,
)
normal_output = run_modules(
mconfig["module_connection"],
tvm.cpu(),
"llvm",
"data_0",
data,
mod2,
"data_1",
data,
customized_parameters_mod,
customized_parameters,
)
normal_outputs.append(normal_output)
pipeline_module_test.set_input("data_a", tvm.nd.array(data))
pipeline_module_test.set_input("data_b", tvm.nd.array(data))
input_map = pipeline_module_test.get_input_pipeline_map("data_a")
if input_map[0] == "0":
input_data = pipeline_module_test.get_input("data_a")
tvm.testing.assert_allclose(data, input_data.numpy())
assert pipeline_module_test.num_inputs == 2
pipeline_module_test.run()
for k in range(0, len(datas)):
statistic_time = 0
outputs = pipeline_module_test.get_output()
while len(outputs) == 0:
outputs = pipeline_module_test.get_output()
statistic_time = statistic_time + 1
assert statistic_time < 5
time.sleep(1)
for i in range(len(outputs)):
tvm.testing.assert_allclose(normal_outputs[k][i], outputs[i].numpy())
assert not (normal_output[i] == wrong_output[i]).all()
assert pipeline_module_test.num_executing_pipeline == round + 1
reset_cpu_affinity(affinity)
if __name__ == "__main__":
pytest. |
main([__file__]) |
import pytest |
import tvm |
import tvm.relay |
import tvm.testing
from tvm.relay.testing |
import run_infer_type
@tvm.testing.parametrize_targets
def test_threefry_repeatability(target, dev):
key1 = tvm.relay.random.threefry_key(1)
rand1 = tvm.relay.random.threefry_generate(key1, (12,))
out_key1, out1 = tvm.relay.create_executor(
"vm", tvm.IRModule.from_expr(tvm.relay.Function([], rand1)), target=target, device=dev
).evaluate()()
key2 = tvm.relay.random.threefry_key(1)
rand2 = tvm.relay.random.threefry_generate(key2, (12,))
out_key2, out2 = tvm.relay.create_executor(
"vm", tvm.IRModule.from_expr(tvm.relay.Function([], rand2)), target=target, device=dev
).evaluate()()
assert (
out1.numpy() == out2.numpy()
).all(), "Generate on same seed should have the same output random numbers"
assert (
out_key1.numpy() == out_key2.numpy()
).all(), "Generate on same seed should have the same next keys"
@tvm.testing.parametrize_targets
def test_threefry_split(target, dev):
key = tvm.relay.random.threefry_key(1)
left, right = tvm.relay.TupleWrapper(tvm.relay.random.threefry_split(key), 2)
_, rand1 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(left, (16,)), 2)
_, rand2 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(right, (16,)), 2)
out1, out2 = tvm.relay.create_executor(
"vm",
tvm.IRModule.from_expr(tvm.relay.Function([], tvm.relay.Tuple((rand1, rand2)))),
target=target,
device=dev,
).evaluate()()
assert (
out1.numpy() != out2.numpy()
).any(), "Generate after split should not have the same output"
@tvm.testing.parametrize_targets
def test_threefry_sequential_generate(target, dev):
key = tvm.relay.random.threefry_key(1)
key, rand1 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(key, (4,)), 2)
_, rand2 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(key, (4,)), 2)
out1, out2 = tvm.relay.create_executor(
"vm",
tvm.IRModule.from_expr(tvm.relay.Function([], tvm.relay.Tuple((rand1, ran |
d2)))),
target=target,
device=dev,
).evaluate()()
assert (
out1.numpy() != out2.numpy()
).any(), "Sequential generates should not have the same output"
@tvm.testing.parametrize_targets
def test_threefry_sequential_generate_remaining(target, dev):
key = tvm.relay.random.threefry_key(1)
key, rand1 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(key, (7,)), 2)
_, rand2 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(key, (7,)), 2)
out1, out2 = tvm.relay.create_executor(
"vm",
tvm.IRModule.from_expr(tvm.relay.Function([], tvm.relay.Tuple((rand1, rand2)))),
target=target,
device=dev,
).evaluate()()
assert (
out1.numpy()[-3:] != out2.numpy()[-3:]
).any(), "Sequential generates should not have the same output"
def test_threefry_generate_infer():
oshape = (12,)
key_type = tvm.relay.TensorType([10], dtype="uint64")
gen_type = tvm.relay.TensorType(oshape, dtype="uint64")
expected_type = tvm.relay.TupleType([key_type, gen_type])
key = tvm.relay.random.threefry_key(1)
rand1 = tvm.relay.random.threefry_generate(key, oshape)
f = tvm.relay.Function([], rand1)
f = run_infer_type(f)
assert tvm.ir.structural_equal(f.ret_type, expected_type)
def test_threefry_split_infer():
key_type = tvm.relay.TensorType([10], dtype="uint64")
expected_type = tvm.relay.TupleType([key_type, key_type])
key = tvm.relay.random.threefry_key(1)
out_keys = tvm.relay.random.threefry_split(key)
f = tvm.relay.Function([], out_keys)
f = run_infer_type(f)
assert tvm.ir.structural_equal(f.ret_type, expected_type)
def test_uniform_infer():
oshape = (12,)
odtypes = ["float32", "float64"]
for odtype in odtypes:
key_type = tvm.relay.TensorType([10], dtype="uint64")
gen_type = tvm.relay.TensorType(oshape, dtype=odtype)
expected_type = tvm.relay.TupleType([key_type, gen_type])
key = tvm.relay.random.threefry_key(1) |
rand1 = tvm.relay.random.uniform(key, oshape, odtype)
f = tvm.relay.Function([], rand1)
f = run_infer_type(f)
assert tvm.ir.structural_equal(f.ret_type, expected_type)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_threefry_generate_infer_fail():
fake_key = tvm.relay.const([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype="uint64")
rand1 = tvm.relay.random.threefry_generate(fake_key, (12,))
f = tvm.relay.Function([], rand1)
f = run_infer_type(f)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_threefry_split_infer_fail():
fake_key = tvm.relay.const([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype="uint64")
out_keys = tvm.relay.random.threefry_split(fake_key)
f = tvm.relay.Function([], out_keys)
f = run_infer_type(f)
@tvm.testing.requires_llvm
def test_threefry_generate_out_size():
key = tvm.relay.random.threefry_key(1)
key, rand1 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(key, (5,)), 2)
out = tvm.relay.create_executor(
"vm",
tvm.IRModule.from_expr(tvm.relay.Function([], rand1)),
target=tvm.target.Target("llvm"),
device=tvm.device("cpu"),
).evaluate()()
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.testing |
import to_python, run_as_python
from tvm.relay.prelude |
import Prelude
from tvm.runtime.container |
import ADT
from tvm.relay.backend.interpreter |
import RefValue, ConstructorValue
def seq(*exprs):
ret = exprs[0]
for expr in exprs[1:]:
ret = relay.Let(relay.var("_"), ret, expr)
return ret
def init_box_adt(mod):
box = relay.GlobalTypeVar("box")
a = relay.TypeVar("a")
box_ctor = relay.Constructor("box", [a], box)
mod[box] = relay.TypeData(box, [a], [box_ctor])
return (box, box_ctor)
def assert_tensor_value(candidate, val):
assert isinstance(candidate, tvm.nd.NDArray)
assert np.array_equal(candidate.numpy(), np.array(val))
def assert_adt_len(candidate, fields):
assert isinstance(candidate, ADT)
assert len(candidate) == fields
def assert_constructor_value(candidate, constructor, fields):
assert isinstance(candidate, ConstructorValue)
assert candidate.tag == constructor.tag
assert len(candidate.fields) == fields
def test_create_empty_tuple():
empty = relay.Tuple([])
tup_val = run_as_python(empty)
assert_adt_len(tup_val, 0)
def test_create_scalar():
scalar = relay.const(1)
tensor_val = run_as_python(scalar)
print(type(tensor_val))
assert_tensor_value(tensor_val, 1)
def test_create_tensor():
tensor = relay.const([[1, 1], [2, 2]])
tensor_val = run_as_python(tensor)
assert_tensor_value(tensor_val, [[1, 1], [2, 2]])
def test_create_nested_tuple():
relay_tup = relay.Tuple(
[relay.const(1), relay.const(2), relay.Tuple([relay.const(3), relay.const(4)])]
)
tup_val = run_as_python(relay_tup)
assert_adt_len(tup_val, 3)
for i in range(2):
assert_tensor_value(tup_val[i], i + 1)
assert_adt_len(tup_val[2], 2)
for i in range(2):
assert_tensor_value(tup_val[2][i], i + 3)
def test_tuple_get_item():
relay_tup = relay.Tuple(
[relay.const(1), relay.const(2), relay.Tuple([relay.const(3), relay.const(4)])]
)
for i in range(2):
index = relay.TupleGetItem(relay_tup, i)
val = run_as_python(index)
assert_tensor_value(val, i + 1)
for i in range(2): |
index = relay.TupleGetItem(relay.TupleGetItem(relay_tup, 2), i)
val = run_as_python(index)
assert_tensor_value(val, i + 3)
def test_create_let():
v = relay.Var("v")
let = relay.Let(v, relay.Tuple([]), relay.Tuple([v, v]))
tup_val = run_as_python(let)
assert_adt_len(tup_val, 2)
assert_adt_len(tup_val[0], 0)
assert_adt_len(tup_val[1], 0)
def test_create_ref():
relay_ref = relay.RefCreate(relay.Tuple([]))
ref_val = run_as_python(relay_ref)
assert isinstance(ref_val, RefValue)
assert_adt_len(ref_val.value, 0)
def test_ref_read():
v = relay.Var("v")
assign = relay.Let(v, relay.RefCreate(relay.Tuple([])), relay.RefRead(v))
read_val = run_as_python(assign)
assert_adt_len(read_val, 0)
def test_ref_write():
v = relay.Var("v")
initial_write = relay.Let(
v,
relay.RefCreate(relay.Tuple([relay.const(1)])),
relay.RefWrite(v, relay.Tuple([relay.const(2)])),
)
write_val = run_as_python(initial_write)
assert_adt_len(write_val, 0)
w = relay.Var("w")
read_after_write = relay.Let(
v,
relay.RefCreate(relay.Tuple([relay.const(1)])),
relay.Let(
w,
relay.RefCreate(relay.RefRead(v)),
seq(
relay.RefWrite(v, relay.Tuple([relay.const(2)])),
relay.Tuple([relay.RefRead(w), relay.RefRead(v)]),
),
),
)
read_val = run_as_python(read_after_write)
assert_adt_len(read_val, 2)
assert_adt_len(read_val[0], 1)
assert_adt_len(read_val[1], 1)
assert_tensor_value(read_val[0][0], 1)
assert_tensor_value(read_val[1][0], 2)
def test_if():
true_cond = relay.const(True)
false_cond = relay.const(False)
v = relay.Var("v")
true_branch = seq(relay.RefWrite(v, relay.const(1)), relay.RefRead(v))
false_branch = seq(relay.RefWrite(v, relay.const(2)), relay.RefRead(v))
true_expr = relay.Let(
v, relay.RefCreate(relay.const(0)), relay.If(true_cond |
, true_branch, false_branch)
)
false_expr = relay.Let(
v, relay.RefCreate(relay.const(0)), relay.If(false_cond, true_branch, false_branch)
)
true_val = run_as_python(true_expr)
assert_tensor_value(true_val, 1)
false_val = run_as_python(false_expr)
assert_tensor_value(false_val, 2)
def test_local_function():
v = relay.Var("v")
ident = relay.Function([v], v)
f = relay.Var("f")
call1 = relay.Let(f, ident, f(relay.Tuple([])))
call2 = relay.Let(f, ident, f(relay.const(2)))
call_val1 = run_as_python(call1)
assert_adt_len(call_val1, 0)
call_val2 = run_as_python(call2)
assert_tensor_value(call_val2, 2)
def test_global_function():
mod = tvm.IRModule()
ident = relay.GlobalVar("ident")
a = relay.TypeVar("a")
v = relay.Var("v", a)
mod[ident] = relay.Function([v], v, a, [a])
call1 = ident(relay.const(1))
call2 = ident(relay.Tuple([relay.const(2), relay.const(2)]))
call_val1 = run_as_python(call1, mod)
assert_tensor_value(call_val1, 1)
call_val2 = run_as_python(call2, mod)
assert_adt_len(call_val2, 2)
assert_tensor_value(call_val2[0], 2)
assert_tensor_value(call_val2[1], 2)
def test_constructor():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
init_box_int = box_ctor(relay.const(1))
box_val_int = run_as_python(init_box_int, mod)
assert_constructor_value(box_val_int, box_ctor, 1)
assert_tensor_value(box_val_int.fields[0], 1)
init_box_tup = box_ctor(relay.Tuple([]))
box_val_tup = run_as_python(init_box_tup, mod)
assert_constructor_value(box_val_tup, box_ctor, 1)
assert_adt_len(box_val_tup.fields[0], 0)
def test_match_wildcard():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
v = relay.Var("v")
match = relay.Let(
v,
box_ctor(relay.Tuple([])),
relay.Match(v, [relay.Clause(relay.PatternWildcard(), relay.const(1))]),
)
match_val = run_as_python(match, mod)
assert_tensor_value(match_val |
, 1)
def test_match_var():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
v = relay.Var("v")
w = relay.Var("w")
match = relay.Let(
v, box_ctor(relay.const(1)), relay.Match(v, [relay.Clause(relay.PatternVar(w), w)])
)
match_val = run_as_python(match, mod)
assert_constructor_value(match_val, box_ctor, 1)
assert_tensor_value(match_val.fields[0], 1)
def test_match_pattern():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
v = relay.Var("v")
w = relay.Var("w")
match = relay.Let(
v,
box_ctor(relay.const(1)),
relay.Match(
v, [relay.Clause(relay.PatternConstructor(box_ctor, [relay.PatternVar(w)]), w)]
),
)
match_val = run_as_python(match, mod)
assert_tensor_value(match_val, 1)
def test_nested_match_pattern():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
v = relay.Var("v")
w = relay.Var("w")
match = relay.Let(
v,
box_ctor(box_ctor(relay.const(2))),
relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
box_ctor, [relay.PatternConstructor(box_ctor, [relay.PatternVar(w)])]
),
w,
)
],
),
)
match_val = run_as_python(match, mod)
assert_tensor_value(match_val, 2)
def test_match_order():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
v = relay.Var("v")
w = relay.Var("w")
match = relay.Let(
v,
box_ctor(box_ctor(relay.const(2))),
relay.Match(
v,
[
relay.Clause(relay.PatternWildcard(), relay.const(1)),
relay.Clause(
relay.PatternConstructor(
box_ctor, [relay.PatternConstructor(box_ctor, [relay.PatternVar(w)])]
),
w,
),
],
),
)
ma |
tch_val = run_as_python(match, mod)
assert_tensor_value(match_val, 1)
def test_local_recursion():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = p.mod.get_type("List")
v = relay.Var("v")
h = relay.Var("h")
t = relay.Var("t")
f = relay.Var("f")
let = relay.Let(
f,
relay.Function(
[v],
relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(h), relay.PatternVar(t)]),
cons(h, f(t)),
),
relay.Clause(relay.PatternConstructor(nil, []), nil()),
],
),
),
f(cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))),
)
val = run_as_python(let, mod)
assert_constructor_value(val, cons, 2)
assert_tensor_value(val.fields[0], 1)
assert_constructor_value(val.fields[1], cons, 2)
assert_tensor_value(val.fields[1].fields[0], 2)
assert_constructor_value(val.fields[1].fields[1], cons, 2)
assert_tensor_value(val.fields[1].fields[1].fields[0], 3)
assert_constructor_value(val.fields[1].fields[1].fields[1], nil, 0)
def test_global_recursion():
mod = tvm.IRModule()
p = Prelude(mod)
rlist, cons, nil = p.mod.get_type("List")
copy = relay.GlobalVar("copy")
a = relay.TypeVar("a")
v = relay.Var("v", rlist(a))
h = relay.Var("h")
t = relay.Var("t")
copy_def = relay.Function(
[v],
relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(h), relay.PatternVar(t)]),
cons(h, copy(t)),
),
relay.Clause(relay.PatternConstructor(nil, []), nil()),
],
),
rlist(a),
[a],
)
mod[copy] = copy_def
call1 = copy_def(cons(relay.const(1), cons(relay.const(2), nil())))
val1 = ru |
n_as_python(call1, mod)
assert_constructor_value(val1, cons, 2)
assert_tensor_value(val1.fields[0], 1)
assert_constructor_value(val1.fields[1], cons, 2)
assert_tensor_value(val1.fields[1].fields[0], 2)
assert_constructor_value(val1.fields[1].fields[1], nil, 0)
call2 = copy_def(cons(relay.Tuple([]), nil()))
val2 = run_as_python(call2, mod)
assert_constructor_value(val2, cons, 2)
assert_adt_len(val2.fields[0], 0)
assert_constructor_value(val2.fields[1], nil, 0)
def test_higher_order_call():
h = relay.Var("h")
f = relay.Var("f")
x = relay.Var("x")
ho_anon = relay.Let(
h, relay.Function([f], f(relay.Tuple([]))), h(relay.Function([x], relay.const(1)))
)
anon_val = run_as_python(ho_anon)
assert_tensor_value(anon_val, 1)
g = relay.Var("g")
ho_named = relay.Let(
h,
relay.Function([f], f(relay.Tuple([]))),
relay.Let(g, relay.Function([x], relay.const(2)), h(g)),
)
named_val = run_as_python(ho_named)
assert_tensor_value(named_val, 2)
def test_match_effect_exactly_once():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = p.mod.get_type("List")
r = relay.Var("r")
data = seq(relay.RefWrite(r, cons(relay.Tuple([]), relay.RefRead(r))), relay.RefRead(r))
match = relay.Let(
r,
relay.RefCreate(nil()),
relay.Match(
data,
[
relay.Clause(relay.PatternConstructor(nil, []), relay.const(0)),
relay.Clause(
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])]
),
relay.const(1),
),
relay.Clause(relay.PatternWildcard(), relay.const(2)),
],
),
)
match_val = run_as_python(match, mod)
assert_tensor_value(match_val, 1)
def test_arbitrary_let_nesting():
mod = tvm.IRModule()
p = Prelude(mod) |
x = relay.Var("x")
r = relay.Var("r")
y = relay.Var("y")
z = relay.Var("z")
expr = relay.Tuple(
[
relay.Let(x, relay.Tuple([relay.const(1), relay.const(2)]), relay.TupleGetItem(x, 1)),
relay.Let(
r,
relay.RefCreate(relay.const(1)),
seq(relay.RefWrite(r, relay.const(3)), relay.RefRead(r)),
),
relay.Let(y, p.id(relay.Let(z, relay.const(4), z)), y),
]
)
tup_val = run_as_python(expr, mod)
assert_adt_len(tup_val, 3)
assert_tensor_value(tup_val[0], 2)
assert_tensor_value(tup_val[1], 3)
assert_tensor_value(tup_val[2], 4)
def test_ref_execution_order():
x = relay.Var("x")
y = relay.Var("y")
f = relay.Var("f")
r = relay.Var("r")
expr = relay.Let(
f,
relay.Function([x, y], x),
relay.Let(
r,
relay.RefCreate(relay.const(1)),
relay.Tuple(
[
relay.RefRead(r),
seq(relay.RefWrite(r, relay.const(2)), relay.RefRead(r)),
seq(relay.RefWrite(r, relay.const(3)), relay.RefRead(r)),
f(
seq(relay.RefWrite(r, relay.const(4)), relay.RefRead(r)),
seq(relay.RefWrite(r, relay.const(5)), relay.RefRead(r)),
),
relay.RefRead(r),
]
),
),
)
tup_val = run_as_python(expr)
assert_adt_len(tup_val, 5)
assert_tensor_value(tup_val[0], 1)
assert_tensor_value(tup_val[1], 2)
assert_tensor_value(tup_val[2], 3)
assert_tensor_value(tup_val[3], 4)
assert_tensor_value(tup_val[4], 5)
def test_op_add():
add = relay.add(relay.const(1), relay.const(2))
add_val = run_as_python(add)
assert_tensor_value(add_val, 3) |
def test_op_stack():
def verify_stack(dshapes, axis):
x_data = [np.random.normal(size=shape).astype("int32") for shape in dshapes]
ref_res = np.stack(x_data, axis=axis)
args = []
for data in x_data:
args.append(relay.const(data))
call = relay.stack(relay.Tuple(args), axis)
call_val = run_as_python(call)
type(call_val)
assert_tensor_value(call_val, ref_res)
verify_stack([(2,), (2,), (2,)], -1)
verify_stack([(2,), (2,), (2,)], 0)
verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)
def test_split():
def verify_split(shape, indices_or_sections, axis=0):
x = np.random.normal(size=shape).astype("float32")
ref_res = np.split(x, indices_or_sections, axis=axis)
call = relay.split(relay.const(x), indices_or_sections, axis=axis)
call_val = run_as_python(call)
assert_adt_len(call_val, len(ref_res))
for i in range(len(ref_res)):
assert_tensor_value(call_val[i], ref_res[i])
verify_split((2, 3), 2)
verify_split((5, 3), [3])
verify_split((5, 9, 3), [3, 4], 1)
verify_split((5, 5, 2, 2), 5, 1)
verify_split((5, 5, 2, 2), 5, 0)
def test_batch_norm():
def verify_batch_norm(shapes):
data = [np.absolute(np.random.normal(size=shape).astype("float32")) for shape in shapes]
relay_args = [relay.const(arg) for arg in data]
eps = 1e-5
def reference(x, gamma, beta, moving_mean, moving_var):
return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta
ref_res = reference(*data)
call = relay.nn.batch_norm(*relay_args, epsilon=eps)[0]
call_val = run_as_python(call)
assert isinstance(call_val, tvm.nd.NDArray)
tvm.testing.assert_allclose(call_val.numpy(), ref_res, atol=eps, rtol=eps)
verify_batch_norm([(10, 20), (20,), (20,), (20,), (20,)])
verify_batch_norm([(2 |
0, 10), (10,), (10,), (10,), (10,)])
verify_batch_norm([(10, 50), (50,), (50,), (50,), (50,)])
verify_batch_norm([(30, 40), (40,), (40,), (40,), (40,)]) |
import tvm
from tvm |
import relay
from tvm.relay.transform |
import recast
def test_recast_simple():
"""Recast a single convolution operator."""
def before():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
c = relay.nn.conv2d(x, w, padding=(1, 1), out_dtype="float32")
return relay.Function([x, w], c)
def expected():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
x_int = relay.cast(x, "int8")
w_int = relay.cast(w, "int8")
c = relay.nn.conv2d(x_int, w_int, padding=(1, 1), out_dtype="int32")
c_float = relay.cast(c, "float32")
return relay.Function([x, w], c_float)
pre = before()
post = recast(pre, "int8", "int32")
expected = expected()
assert tvm.ir.structural_equal(expected, post)
def test_recast_medium():
"""Recast a slightly larger graph."""
def before():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
c = relay.nn.conv2d(x, w, padding=(1, 1), out_dtype="float32")
w2 = relay.var("w2", shape=[8, 8, 3, 3])
c2 = relay.nn.conv2d(c, w2, padding=(1, 1), out_dtype="float32")
return relay.Function([x, w, w2], c2)
def expected():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
x_int = relay.cast(x, "int8")
w_int = relay.cast(w, "int8")
c = relay.nn.conv2d(x_int, w_int, padding=(1, 1), out_dtype="int32")
c_float = relay.cast(c, "float32")
w2 = relay.var("w2", shape=[8, 8, 3, 3])
w2_int = relay.cast(w2, "int8")
c_float_int = relay.cast(c_float, "int8")
c2 = relay.nn.conv2d(c_float_int, w2_int, padding=(1, 1), out_dtype="int32")
c2_float = relay.cast(c2, "float32")
return relay.Function([x, w, w2], c2_float)
pre = before()
post = recast(pre, "int8", "int32")
expected = expected()
assert tvm.ir.structural_equal(expected, post)
def test_recast_skip():
"""Recast a graph |
using skip layers."""
def before():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
c = relay.nn.conv2d(x, w, padding=(1, 1), out_dtype="float32")
w2 = relay.var("w2", shape=[8, 8, 3, 3])
c2 = relay.nn.conv2d(c, w2, padding=(1, 1), out_dtype="float32")
return relay.Function([x, w, w2], c2)
def expected():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
c = relay.nn.conv2d(x, w, padding=(1, 1), out_dtype="float32")
w2 = relay.var("w2", shape=[8, 8, 3, 3])
w2_int = relay.cast(w2, "int8")
c_int = relay.cast(c, "int8")
c2 = relay.nn.conv2d(c_int, w2_int, padding=(1, 1), out_dtype="int32")
c2_float = relay.cast(c2, "float32")
return relay.Function([x, w, w2], c2_float)
pre = before()
post = recast(pre, "int8", "int32", skip_layers=[0])
expected = expected()
assert tvm.ir.structural_equal(expected, post)
def test_recast_concat():
def before():
x = relay.var("x", shape=[1, 4])
y = relay.var("y", shape=[1, 4])
t = relay.Tuple([x, y])
c = relay.op.concatenate(t, axis=1)
return relay.Function([x, y], c)
def expected():
xv = relay.var("x", shape=[1, 4])
yv = relay.var("y", shape=[1, 4])
x = relay.cast(xv, "float16")
y = relay.cast(yv, "float16")
t = relay.Tuple([x, y])
c = relay.op.concatenate(t, axis=1)
c = relay.cast(c, "float32")
return relay.Function([xv, yv], c)
pre = before()
post = recast(pre, "float16", "float32", ops=["concatenate"])
expected = expected()
assert tvm.ir.structural_equal(expected, post)
def test_recast_relu():
"""Recast a ReLU operator which does not have attributes."""
def before():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
c = relay.nn.conv2d(x, w, padding=(1, 1), out_dtype="float32")
r |
= relay.nn.relu(c)
return relay.Function([x, w], r)
def expected():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
x_fp16 = relay.cast(x, "float16")
w_fp16 = relay.cast(w, "float16")
c = relay.nn.conv2d(x_fp16, w_fp16, padding=(1, 1), out_dtype="float16")
c_float32 = relay.cast(c, "float32")
c_float16 = relay.cast(c_float32, "float16")
r = relay.nn.relu(c_float16)
r_float32 = relay.cast(r, "float32")
return relay.Function([x, w], r_float32)
pre = before()
post = recast(pre, "float16", "float16", ops=["nn.conv2d", "nn.relu"])
expected = expected()
assert tvm.ir.structural_equal(expected, post)
if __name__ == "__main__":
test_recast_simple()
test_recast_medium()
test_recast_skip()
test_recast_concat()
test_recast_relu() |
import numpy as np |
import tvm
from tvm |
import te |
import tvm.testing
from tvm |
import relay
from tvm |
import autotvm
from tvm |
import topi
from tvm.relay.backend |
import te_compiler
from tvm.relay.testing |
import run_infer_type
from tvm.relay.testing.temp_op_attr |
import TempOpAttr
@autotvm.register_topi_compute("test/conv2d_1")
def _compute_conv2d_1(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_1")
def _schedule_conv2d_1(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
@autotvm.register_topi_compute("test/conv2d_2")
def _compute_conv2d_2(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_2")
def _schedule_conv2d_2(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
def _compute_conv2d_3(input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
def _schedule_conv2d_3(outs):
return topi.generic.schedule_conv2d_nchw(outs)
@tvm.target.override_native_generic_func("test_conv2d_strategy")
def _tmp_strategy(attrs, inputs, out_type, target):
strategy = relay.op.OpStrategy()
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_1),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_1),
name="conv2d_1",
plevel=10,
)
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_2),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_2),
name="conv2d_2",
plevel=15,
)
ic = inputs[0].shape[1]
with tvm.te.SpecializedCondition(ic >= 16):
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_3),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_3),
name="conv2d_3",
plevel=20,
)
return strategy
def _create_record(task_name, dshape, wshape, target, cost):
args = [te.placeholder(dshape), te.placeholder(wshape), (1, 1), (1, 1, 1, 1), (1, 1), " |
float32"]
task = autotvm.task.create(task_name, args, target)
cfg = autotvm.ConfigEntity(0, None, {}, [])
cfg.cost = cost
inp = autotvm.MeasureInput(target=target, task=task, config=cfg)
result = autotvm.MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
return (inp, result)
def test_get_valid_implementations():
target = tvm.target.Target("llvm")
def _get_impls(dshape, wshape):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.te_compiler.get_valid_implementations(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impls = _get_impls((1, 8, 7, 7), (32, 8, 3, 3))
assert len(impls) == 2
impls = _get_impls((1, 16, 7, 7), (32, 16, 3, 3))
assert len(impls) == 3
def test_select_implementation():
target = tvm.target.Target("llvm")
def _select_impl(dshape, wshape, use_autotvm=False):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
use_autotvm,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3))
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3)) |
assert impl.name == "conv2d_3"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_3"
records = []
records.append(_create_record("test/conv2d_1", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.5))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.0))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_1"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
records.append(_create_record("test/conv2d_2", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.2))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.2))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
def test_te_compiler():
tec = relay.backend.te_compiler.get()
def get_func(shape):
x = relay.var("x", shape=shape)
y = relay.add(x, x)
z = relay.add(y, x)
f = relay.Function([x], z)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
return mod["main"]
z1 = tec.lower(get_func((10,)), "llvm")
z2 = tec.lower(get_func((10,)), "llvm")
z3 = tec.lower(get_func(()), "llvm")
assert z1.same_as(z2)
assert not z3.same_as(z1)
if tvm.testing.device_enabled("cuda"):
z4 = tec.lower(get_func(()), "cuda")
assert not z3.same_as(z4)
for target in ["llvm"]:
dev = tvm.device(target)
if tvm.testing.device_enabled(target):
f = tec.jit(get_func((10,)), target)
x = tvm.nd.ar |
ray(np.ones(10).astype("float32"), device=dev)
y = tvm.nd.empty((10,), device=dev)
f(x, y)
tvm.testing.assert_allclose(y.numpy(), x.numpy() * 3)
def test_compile_placeholder_bypass():
te_compiler = relay.backend.te_compiler.get()
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
z = relay.var("z", shape=(2, 3))
result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)])
func = relay.Function(relay.analysis.free_vars(result), result)
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_injective_with_tuple():
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
x_transpose = relay.transpose(x)
output = relay.Tuple([x_transpose, y])
func = relay.Function([x, y], output)
relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_tuple_dup():
x = relay.var("data", shape=(16, 16))
log = relay.log(x)
output = relay.Tuple([log, log])
f = relay.Function([x], output)
relay.build(tvm.IRModule.from_expr(f), "llvm")
def test_compile_full():
shape = (
tvm.tir.IntImm("int32", 1),
tvm.tir.IntImm("int64", 16),
tvm.tir.IntImm("int64", 16),
tvm.tir.IntImm("int32", 64),
)
output = relay.full(relay.const(0, "int32"), shape=shape, dtype="int32")
f = relay.Function([], output)
mod = tvm.IRModule.from_expr(f)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
relay.build(mod, "llvm")
def test_compile_nhwc_pack():
data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
conv = relay.nn.conv2d(
data,
weight,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
multiply = relay.multiply(relay.const( |
-22, dtype="int32"), p2)
tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
subtract = relay.subtract(conv, tile)
func = subtract
mod = relay.Function(relay.analysis.free_vars(func), func)
relay.build(mod, target="llvm")
def test_compile_propogate_hash():
data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
conv = relay.nn.conv2d(
data,
weight,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
multiply = relay.multiply(relay.const(-22, dtype="int32"), p2)
tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
subtract = relay.subtract(conv, tile)
func = subtract
mod = tvm.IRModule.from_expr(relay.Function(relay.analysis.free_vars(func), func))
vm = relay.vm.VMCompiler()
opt_mod, _ = vm.optimize(mod, target="llvm")
for f in opt_mod.functions.values():
assert "hash" in f.attrs.keys()
if __name__ == "__main__":
test_get_valid_implementations()
test_select_implementation()
test_te_compiler()
test_compile_placeholder_bypass()
test_compile_injective_with_tuple()
test_compile_tuple_dup()
test_compile_full()
test_compile_nhwc_pack() |
import pytest
from tvm |
import TVMError
from tvm.relay.backend |
import Runtime
def test_create():
runtime = Runtime("cpp")
assert str(runtime) == "cpp"
def test_create_runtime_with_options():
runtime = Runtime("crt", {"system-lib": True})
assert str(runtime) == "crt"
assert runtime["system-lib"]
def test_attr_check():
runtime = Runtime("crt", {"system-lib": True})
assert "woof" not in runtime
assert "system-lib" in runtime
def test_create_runtime_not_found():
with pytest.raises(TVMError, match='Runtime "woof" is not defined'):
Runtime("woof", {})
def test_create_runtime_attr_not_found():
with pytest.raises(TVMError, match='Attribute "woof" is not available on this Runtime'):
Runtime("crt", {"woof": "bark"})
def test_create_runtime_attr_type_incorrect():
with pytest.raises(
TVMError,
match='Attribute "system-lib" should have type "IntImm"'
' but instead found "runtime.String"',
):
Runtime("crt", {"system-lib": "woof"})
def test_list_runtimes():
assert "crt" in Runtime.list_registered()
@pytest.mark.parametrize("runtime", [Runtime("crt"), "crt"])
def test_list_runtime_options(runtime):
aot_options = Runtime.list_registered_options(runtime)
assert "system-lib" in aot_options
assert aot_options["system-lib"] == "IntImm"
def test_list_runtime_options_not_found():
with pytest.raises(TVMError, match='Runtime "woof" is not defined'):
Runtime.list_registered_options("woof") |
import itertools |
import numpy as np |
import scipy.sparse as sp |
import tvm
from tvm.ir |
import IRModule
from tvm |
import relay
from tvm.relay.data_dep_optimization |
import simplify_fc_transpose
def run_func(func, params, x):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, "llvm", params=params)
from tvm.contrib |
import graph_executor
dev = tvm.cpu(0)
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.run()
tvm_output = m.get_output(0)
return tvm_output.numpy()
def test_simplify_fc_transpose():
data = relay.var("data", shape=(1, 32), dtype="float32")
x = relay.nn.relu(data)
w1 = relay.var("w1", shape=(32, 64), dtype="float32")
y = relay.nn.dense(x, relay.transpose(w1, axes=[1, 0]))
z = relay.nn.relu(y)
w2 = relay.var("w2", shape=(64, 16), dtype="float32")
zz = relay.nn.dense(z, relay.transpose(w2, axes=[1, 0]))
func = relay.Function(relay.analysis.free_vars(zz), zz)
params = {
"w1": tvm.nd.array(np.random.uniform(-1, 1, (32, 64)).astype("float32")),
"w2": tvm.nd.array(np.random.uniform(-1, 1, (64, 16)).astype("float32")),
}
x_np = np.random.randn(1, 32).astype("float32")
old_result = run_func(func, params, x_np)
new_func, new_params = simplify_fc_transpose.convert(func, params)
new_result = run_func(new_func, new_params, x_np)
np.testing.assert_allclose(old_result, new_result, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_simplify_fc_transpose() |
import itertools |
import numpy as np |
import scipy.sparse as sp |
import tvm
from tvm.ir |
import IRModule
from tvm |
import relay
from tvm.topi.sparse.utils |
import random_bsr_matrix
from tvm.relay.build_module |
import bind_params_by_name
def run_func(func, params, x):
with tvm.transform.PassContext(opt_level=3):
graph, lib, new_params = relay.build(func, "llvm", params=params)
from tvm.contrib |
import graph_executor
dev = tvm.cpu(0)
dtype = "float32"
m = graph_executor.create(graph, lib, dev)
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.set_input(**new_params)
m.run()
tvm_output = m.get_output(0)
return tvm_output.numpy()
def test_bsr_sparse_conv2d_nchw():
data = relay.var("data", shape=(1, 64, 32, 32), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(128, 64, 1, 1), dtype="float32")
y = relay.nn.conv2d(x, w, channels=128, kernel_size=1, data_layout="NCHW", kernel_layout="OIHW")
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {
"weight": tvm.nd.array(
np.array(random_bsr_matrix(128, 64, 8, 1, 0.1, "float32").todense()).reshape(
128, 64, 1, 1
)
)
}
x_np = np.random.randn(1, 64, 32, 32).astype("float32")
dense_output = run_func(func, params, x_np)
sparse_func, params = relay.data_dep_optimization.bsr_conv2d.convert(
func, params, (8, 1), 0.2, "NCHW"
)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
def test_bsr_sparse_conv2d_nhwc():
data = relay.var("data", shape=(1, 32, 32, 64), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(1, 1, 64, 128), dtype="float32")
y = relay.nn.conv2d(x, w, channels=128, kernel_size=1, data_layout="NHWC", kernel_layout="HWIO")
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {
"weight": tvm.nd.array(
np.array(random_bsr_matrix(128, 64, 8, 1, 0.1, "float32").todense()).T.reshape(
1, 1, 64, 128
)
)
}
x_np = np.random.randn(1, 32, 32, 64).astype("float32")
dense_output = run_func(func, params, x_np)
sparse_func, params = relay.data_dep_optimization.bsr_conv2d.convert(
func, para |
ms, (8, 1), 0.2, "NHWC"
)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
def test_bsr_sparse_conv2d_3x3_nchw():
data = relay.var("data", shape=(1, 64, 32, 32), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(128, 64, 3, 3), dtype="float32")
y = relay.nn.conv2d(
x, w, channels=128, kernel_size=3, padding=1, data_layout="NCHW", kernel_layout="OIHW"
)
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {
"weight": tvm.nd.array(
np.array(random_bsr_matrix(128, 64 * 9, 16, 1, 0.1, "float32").todense()).reshape(
128, 64, 3, 3
)
)
}
x_np = np.random.randn(1, 64, 32, 32).astype("float32")
dense_output = run_func(func, params, x_np)
func = bind_params_by_name(func, params)
sparse_func, params = relay.data_dep_optimization.bsr_conv2d.convert2(
func, {}, (16, 1), 0.2, "NCHW", 3
)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
def test_bsr_sparse_conv2d_3x3_nhwc():
data = relay.var("data", shape=(1, 32, 32, 64), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(3, 3, 64, 128), dtype="float32")
y = relay.nn.conv2d(
x, w, channels=128, kernel_size=3, padding=1, data_layout="NHWC", kernel_layout="HWIO"
)
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {
"weight": tvm.nd.array(
np.array(random_bsr_matrix(128, 64 * 9, 16, 1, 0.1, "float32").todense()).T.reshape(
3, 3, 64, 128
)
)
}
x_np = np.random.randn(1, 32, 32, 64).astype("float32")
dense_output = run_func(func, params, x_np)
func = bind_params_by_name(func, params)
sparse_func, params = relay.data_dep_optimization |
.bsr_conv2d.convert2(
func, {}, (16, 1), 0.2, "NHWC", 3
)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_bsr_sparse_conv2d_nhwc()
test_bsr_sparse_conv2d_nchw()
test_bsr_sparse_conv2d_3x3_nhwc()
test_bsr_sparse_conv2d_3x3_nchw() |
import itertools |
import numpy as np |
import scipy.sparse as sp |
import tvm
from tvm.ir |
import IRModule
from tvm |
import relay
def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype="float32"):
Y = np.zeros((M, N), dtype=dtype)
assert M % BS_R == 0
assert N % BS_C == 0
nnz = int(density * M * N)
num_blocks = int(nnz / (BS_R * BS_C)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, M, BS_R), range(0, N, BS_C))))
assert candidate_blocks.shape[0] == M
chosen_blocks = candidate_blocks[
np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)
]
for i in range(len(chosen_blocks)):
r, c = chosen_blocks[i]
Y[r : r + BS_R, c : c + BS_C] = np.random.randn(BS_R, BS_C)
s = sp.bsr_matrix(Y, blocksize=(BS_R, BS_C))
assert s.data.shape == (num_blocks, BS_R, BS_C)
assert s.data.size >= nnz
assert s.indices.shape == (num_blocks,)
assert s.indptr.shape == (M
return s
def run_func(func, params, x):
with tvm.transform.PassContext(opt_level=3):
graph, lib, new_params = relay.build(func, "llvm", params=params)
from tvm.contrib |
import graph_executor
dev = tvm.cpu(0)
dtype = "float32"
m = graph_executor.create(graph, lib, dev)
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.set_input(**new_params)
m.run()
tvm_output = m.get_output(0)
return tvm_output.numpy()
def test_bsr_sparse_dense():
data = relay.var("data", shape=(1, 128), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(768, 128), dtype="float32")
y = relay.nn.dense(x, w)
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {"weight": tvm.nd.array(random_bsr_matrix(768, 128, 32, 1, 0.1).todense())}
x_np = np.random.randn(1, 128).astype("float32")
dense_output = run_func(func, params, x_np)
sparse_func, params = relay.data_dep_optimization.bsr_dense.convert(func, params, (32, 1), 0.2)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_bsr_sparse_dense() |
"""Unit tests for target hooks.""" |
import sys |
import numpy as np |
import pytest |
import logging |
import tvm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.