text
stringlengths 1
2.05k
|
---|
import relay
from tvm.contrib |
import utils, graph_executor
@tvm.testing.requires_llvm
def test_graph_simple():
n = 4
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
node0 = {"op": "null", "name": "x", "inputs": []}
node1 = {
"op": "tvm_op",
"name": "add",
"inputs": [[0, 0, 0]],
"attrs": {"func_name": "myadd", "flatten_data": "1", "num_inputs": "1", "num_outputs": "1"},
}
nodes = [node0, node1]
arg_nodes = [0]
node_row_ptr = [0, 1, 2]
outputs = [[1, 0, 0]]
shape = (4,)
attrs = {
"shape": ["list_shape", [shape, shape]],
"dltype": ["list_str", ["float32", "float32"]],
"storage_id": ["list_int", [0, 1]],
}
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": outputs,
"attrs": attrs,
}
graph = json.dumps(graph)
def check_verify():
mlib = tvm.build(s, [A, B], "llvm", name="myadd")
mod = graph_executor.create(graph, mlib, tvm.cpu(0))
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.run(x=a)
out = mod.get_output(0, tvm.nd.empty((n,)))
np.testing.assert_equal(out.numpy(), a + 1)
def check_remote(server):
mlib = tvm.build(s, [A, B], "llvm", name="myadd")
remote = rpc.connect(server.host, server.port)
temp = utils.tempdir()
dev = remote.cpu(0)
path_dso = temp.relpath("dev_lib.so")
mlib.export_library(path_dso)
remote.upload(path_dso)
mlib = remote.load_module("dev_lib.so")
mod = graph_executor.create(graph, mlib, remote.cpu(0))
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.run(x=tvm.nd.array(a, dev))
out = tvm.nd.empty((n,), device=dev)
out = mod.get_output(0, out)
np.testing.assert_equal(out.numpy(), a + 1)
def check_sharing():
x = relay.var("x", shape=(1, 10))
y = relay.var("y", sh |
ape=(1, 10))
z = relay.add(x, y)
func = relay.Function([x, y], z)
x_in = np.ones((1, 10)).astype("float32")
params = {"x": x_in}
graph, lib, params = relay.build(func, target="llvm", params=params)
mod_shared = graph_executor.create(graph, lib, tvm.cpu(0))
mod_shared.load_params(runtime.save_param_dict(params))
num_mods = 10
mods = [graph_executor.create(graph, lib, tvm.cpu(0)) for _ in range(num_mods)]
for mod in mods:
mod.share_params(mod_shared, runtime.save_param_dict(params))
a = np.random.uniform(size=(1, 10)).astype("float32")
for mod in mods:
mod.run(y=a)
out = mod.get_output(0, tvm.nd.empty((1, 10)))
np.testing.assert_equal(out.numpy(), x_in + a)
del mod_shared
for mod in mods:
mod.run(y=a)
out = mod.get_output(0, tvm.nd.empty((1, 10)))
np.testing.assert_equal(out.numpy(), x_in + a)
del mod
check_verify()
check_remote(rpc.Server("127.0.0.1"))
check_sharing()
def test_load_unexpected_params():
mod = tvm.IRModule()
params = {}
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1, 10))
z = relay.add(x, y)
mod["main"] = relay.Function([x, y], z)
graph_module = relay.build(mod, target="llvm", params=params)
rt_mod = tvm.contrib.graph_executor.create(
graph_module.get_graph_json(), graph_module.get_lib(), tvm.cpu(0)
)
new_params = graph_module.get_params()
new_params.update({"y_unknown": np.ones((1,)).astype("float32")})
rt_mod.load_params(runtime.save_param_dict(new_params))
if __name__ == "__main__":
test_graph_simple()
test_load_unexpected_params() |
import json |
import os |
import re |
import sys |
import time |
import pytest |
import tvm |
import tvm.testing
from tvm |
import te |
import numpy as np
from tvm.contrib |
import utils, graph_executor
from tvm.contrib.cuda_graph |
import cuda_graph_executor
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
@tvm.testing.requires_cudagraph
def test_graph_simple():
n = 32
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=8)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
node0 = {"op": "null", "name": "x", "inputs": []}
node1 = {
"op": "tvm_op",
"name": "add",
"inputs": [[0, 0, 0]],
"attrs": {"func_name": "myadd", "flatten_data": "1", "num_inputs": "1", "num_outputs": "1"},
}
nodes = [node0, node1]
arg_nodes = [0]
node_row_ptr = [0, 1, 2]
outputs = [[1, 0, 0]]
shape = (n,)
attrs = {
"shape": ["list_shape", [shape, shape]],
"dltype": ["list_str", ["float32", "float32"]],
"storage_id": ["list_int", [0, 1]],
}
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": outputs,
"attrs": attrs,
}
graph = json.dumps(graph)
def check_verify():
mlib = tvm.build(s, [A, B], "cuda", name="myadd")
dev = tvm.cuda(0)
try:
mod = cuda_graph_executor.create(graph, mlib, dev)
except ValueError:
return
for i in range(3):
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.run(x=a)
out = mod.get_output(0, tvm.nd.empty((n,)))
np.testing.assert_equal(out.numpy(), a + 1)
mod.capture_cuda_graph()
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.set_input(x=a)
mod.run_cuda_graph()
out = mod.get_output(0, tvm.nd.empty((n,)))
np.testing.assert_equal(out.numpy(), a + 1)
check_verify()
if __name__ == "__main__":
test_graph_simple() |
import json |
import os |
import re |
import sys |
import time
from distutils.log |
import debug |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import rpc, te
from tvm._ffi.base |
import TVMError
from tvm.contrib |
import utils
from tvm.contrib.debugger |
import debug_executor
@pytest.fixture
def n():
return 4
@pytest.fixture
def A(n):
return te.placeholder((n,), name="A")
@pytest.fixture
def B(A):
return te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
@pytest.fixture
def s(B):
return te.create_schedule(B.op)
@pytest.fixture
def mlib(s, A, B):
return tvm.build(s, [A, B], "llvm", name="myadd")
@pytest.fixture
def myadd(mlib):
def _myadd(*args):
to_return = mlib["myadd"](*args)
time.sleep(0.25)
return to_return
return _myadd
@pytest.fixture
def graph():
node0 = {"op": "null", "name": "x", "inputs": []}
node1 = {
"op": "tvm_op",
"name": "add",
"inputs": [[0, 0, 0]],
"attrs": {"func_name": "myadd", "flatten_data": "1", "num_inputs": "1", "num_outputs": "1"},
}
nodes = [node0, node1]
arg_nodes = [0]
node_row_ptr = [0, 1, 2]
outputs = [[1, 0, 0]]
shape = (4,)
attrs = {
"shape": ["list_shape", [shape, shape]],
"dltype": ["list_str", ["float32", "float32"]],
"storage_id": ["list_int", [0, 1]],
}
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": outputs,
"attrs": attrs,
}
graph = json.dumps(graph)
return graph
@tvm.testing.requires_llvm
@tvm.testing.requires_rpc
@pytest.mark.skipif(
tvm.support.libinfo()["USE_PROFILER"] != "ON", reason="TVM was not built with profiler support"
)
def test_end_to_end_graph_simple(graph, n, A, B, s, myadd):
def check_verify():
mlib_proxy = tvm.support.FrontendTestModule()
mlib_proxy["myadd"] = myadd
mod = debug_executor.create(graph, mlib_proxy, tvm.cpu(0))
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.set_input(x=a)
directory = mod._dump_path
assert os.path.exists(directory)
GRAPH_DUMP_FILE_NAME = "_tvmdbg_graph_dump.json"
assert len(os.listdir(directory)) == 1 |
graph_dump_path = os.path.join(directory, GRAPH_DUMP_FILE_NAME)
assert os.path.exists(graph_dump_path)
with open(graph_dump_path) as graph_f:
dumped_graph = json.load(graph_f)
assert isinstance(dumped_graph, dict)
for k in ("nodes", "arg_nodes", "node_row_ptr", "heads", "attrs"):
assert k in dumped_graph, f"key {k} not in dumped graph {graph!r}"
mod.run()
assert len(os.listdir(directory)) > 1
debug_lines = mod.debug_datum.get_debug_result().split("\n")
def split_debug_line(i):
to_return = re.split(r" [ ]*", debug_lines[i])
assert to_return[-1] == ""
to_return = to_return[:-1]
return to_return
assert split_debug_line(0) == [
"Node Name",
"Ops",
"Time(us)",
"Time(%)",
"Shape",
"Inputs",
"Outputs",
"Measurements(us)",
]
myadd_lines = split_debug_line(2)
assert myadd_lines[0] == "add"
assert myadd_lines[1] == "myadd"
runtime_sec = float(myadd_lines[2]) / 1e6
assert runtime_sec > 0.25 and runtime_sec < 0.25 * 1000
total_lines = split_debug_line(3)
assert total_lines[0] == "Total_time"
assert total_lines[2] == myadd_lines[2]
CHROME_TRACE_FILE_NAME = "_tvmdbg_execution_trace.json"
assert os.path.exists(os.path.join(directory, CHROME_TRACE_FILE_NAME))
with open(os.path.join(directory, CHROME_TRACE_FILE_NAME)) as f:
trace = json.load(f)
assert trace["displayTimeUnit"] == "ns"
events = trace["traceEvents"]
assert len(events) == 4
assert all(event["ph"] in ("B", "E") for event in events)
assert all(event["pid"] == 1 for event in events)
assert all(event["tid"] == 1 for event in events)
assert all(event["name"] == "x" for event in events[:2])
assert all(event["name |
"] == "add" for event in events[2:])
assert events[0]["ts"] == 0
assert events[0]["ph"] == "B"
out = mod.get_output(0, tvm.nd.empty((n,)))
np.testing.assert_equal(out.numpy(), a + 1)
mod.exit()
assert not os.path.exists(directory)
def check_remote(server):
mlib = tvm.build(s, [A, B], "llvm", name="myadd")
remote = rpc.connect(server.host, server.port)
temp = utils.tempdir()
dev = remote.cpu(0)
path_dso = temp.relpath("dev_lib.so")
mlib.export_library(path_dso)
remote.upload(path_dso)
mlib = remote.load_module("dev_lib.so")
try:
mod = debug_executor.create(graph, mlib, remote.cpu(0))
except ValueError:
print("Skip because debug runtime not enabled")
return
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.run(x=tvm.nd.array(a, dev))
out = tvm.nd.empty((n,), device=dev)
out = mod.get_output(0, out)
np.testing.assert_equal(out.numpy(), a + 1)
check_verify()
check_remote(rpc.Server("127.0.0.1"))
@tvm.testing.requires_llvm
@pytest.mark.skipif(
tvm.support.libinfo()["USE_PROFILER"] != "ON", reason="TVM was not built with profiler support"
)
def test_run_single_node(graph, n, A, myadd):
mlib_proxy = tvm.support.FrontendTestModule()
mlib_proxy["myadd"] = myadd
mod: debug_executor.GraphModuleDebug = debug_executor.create(graph, mlib_proxy, tvm.cpu(0))
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.set_input(x=a)
assert len(mod.debug_datum.get_graph_nodes()) == 2
assert mod.debug_datum.get_graph_nodes()[0]["op"] == "param"
assert mod.debug_datum.get_graph_nodes()[1]["op"] == "myadd"
assert mod.run_individual_node(0, number=1).mean == 0
repeat_1_result = mod.run_individual_node(1, repeat=1)
assert repeat_1_result.mean > 0
repeat_3_results = mod.run_individual_node(1, repeat=3)
assert sum(repeat_3_results. |
results) > sum(repeat_1_result.results)
assert len(mod.run_individual_node(1, repeat=10).results) == 10
start = time.time()
mod.run_individual_node(1, min_repeat_ms=500)
end = time.time()
elapsed_time_in_seconds = end - start
assert elapsed_time_in_seconds >= 0.5
start = time.time()
mod.run_individual_node(1, repeat=2, min_repeat_ms=500, cooldown_interval_ms=1000)
end = time.time()
elapsed_time_in_seconds_with_def_rep = end - start
assert elapsed_time_in_seconds_with_def_rep >= 3
start = time.time()
mod.run_individual_node(
1, repeat=2, min_repeat_ms=500, cooldown_interval_ms=1000, repeats_to_cooldown=2
)
end = time.time()
elapsed_time_in_seconds_with_rep_2 = end - start
assert elapsed_time_in_seconds_with_rep_2 >= 2 and (
elapsed_time_in_seconds_with_rep_2 < elapsed_time_in_seconds_with_def_rep
)
with pytest.raises(TVMError):
mod.run_individual_node(2)
if __name__ == "__main__":
tvm.testing.main() |
"""Unit tests for heterogeneous runtime""" |
import json |
import numpy as np |
import tvm
from tvm |
import te
from tvm.contrib |
import graph_executor, utils
from tvm |
import topi
def get_simplex_graph(host_dev_type, device_dev_type):
r""" Return the hand-crafted json object where only one copy node is
inserted. This node copies data from the target device to cpu.
The network is constructed as following:
A B
\ /
elemwise_add (gpu)
\
copy C
\ /
elemwise_sub (cpu)
Parameters
----------
host_dev_type : int
The device type of the host processor, e.g. cpu.
device_dev_type : int
The device type of the device processor, e.g. gpu, opencl, etc.
Returns
-------
json : json
A json encoded object.
"""
var_a = {"op": "null", "name": "A", "inputs": []}
var_b = {"op": "null", "name": "B", "inputs": []}
elemwise_add = {
"op": "tvm_op",
"name": "elemwise_add",
"attrs": {
"flatten_data": "1",
"func_name": "elemwise_add",
"num_inputs": "2",
"num_outputs": "1",
},
"inputs": [[0, 0, 0], [1, 0, 0]],
}
copy = {
"op": "tvm_op",
"name": "__copy_add_to_sub",
"attrs": {
"flatten_data": "0",
"func_name": "__copy",
"num_inputs": "1",
"num_outputs": "1",
},
"inputs": [[2, 0, 0]],
}
var_c = {"op": "null", "name": "C", "inputs": []}
elemwise_sub = {
"op": "tvm_op",
"name": "elemwise_sub",
"attrs": {
"flatten_data": "0",
"func_name": "elemwise_sub",
"num_inputs": "2",
"num_outputs": "1",
},
"inputs": [[3, 0, 0], [4, 0, 0]],
}
nodes = [var_a, var_b, elemwise_add, copy, var_c, elemwise_sub]
arg_nodes = [0, 1, 4]
node_row_ptr = [0, 1, 2, 3, 4, 5, 6]
heads = [[5, 0, 0]]
shape = (4,)
attrs = {
"storage_id": ["list_int", [3, 4, 0, 1, 5, 2]],
"shape": ["l |
ist_shape", [shape, shape, shape, shape, shape, shape]],
"device_index": [
"list_int",
[
device_dev_type,
device_dev_type,
device_dev_type,
host_dev_type,
host_dev_type,
host_dev_type,
],
],
"dtype": ["list_int", [0, 0, 0, 0, 0, 0]],
"dltype": ["list_str", ["float32", "float32", "float32", "float32", "float32", "float32"]],
}
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": heads,
"attrs": attrs,
}
return json.dumps(graph)
def test_simplex_data_transferring():
r"""
Test the heterogeneous execution of a simple network where data
transferring is from the target device to the host processor at runtime.
The host processor is always assumed to be cpu, and the device varies.
"""
host = "cpu"
target_host = "llvm"
host_dev = tvm.device(host)
if not tvm.runtime.enabled(target_host):
print("Skip test because llvm is not enabled.")
return
def check_device(device, target_device):
if not tvm.runtime.enabled(target_device):
print("Skip test because {} is not enabled.".format(target_device))
return
device_dev = tvm.device(device)
graph = get_simplex_graph(host_dev.device_type, device_dev.device_type)
shape = (4,)
tensor_a = te.placeholder(shape, name="A")
tensor_b = te.placeholder(shape, name="B")
elemwise_add = te.compute(
shape, lambda *i: tensor_a(*i) + tensor_b(*i), name="elemwise_add"
)
target = topi.cpp.TEST_create_target(device)
schedule_add = topi.cpp.cuda.schedule_injective(target, [elemwise_add])
lower_add = tvm.lower(schedule_add, [tensor_a, tensor_b, elemwise_add], name="elemwise_add")
tensor_copy = te.placeholder(shape, name="__ |
copy")
tensor_c = te.placeholder(shape, name="C")
elemwise_sub = te.compute(
shape, lambda *i: tensor_copy(*i) - tensor_c(*i), name="elemwise_sub"
)
schedule_sub = te.create_schedule(elemwise_sub.op)
lower_sub = tvm.lower(
schedule_sub, [tensor_copy, tensor_c, elemwise_sub], name="elemwise_sub"
)
target_flist = {target_device: lower_add, target_host: lower_sub}
target = tvm.target.Target(target, target_host)
mhost = tvm.build(target_flist, target=target)
dev = [host_dev, device_dev]
mod = graph_executor.create(graph, mhost, dev)
params = {}
params["A"] = tensor_a = np.random.uniform(size=shape).astype(tensor_a.dtype)
params["B"] = tensor_b = np.random.uniform(size=shape).astype(tensor_b.dtype)
params["C"] = tensor_c = np.random.uniform(size=shape).astype(tensor_c.dtype)
mod.set_input(**params)
mod.run()
out = mod.get_output(0, tvm.nd.empty(shape))
np.testing.assert_equal(out.numpy(), (tensor_a + tensor_b) - tensor_c)
dev_tar = {"cuda": "cuda", "opencl": "opencl"}
for device, target in dev_tar.items():
with tvm.target.Target(device):
check_device(device, target)
def get_duplex_graph(host_dev_type, device_dev_type):
r""" Return the hand-crafted json object where two copy nodes are inserted.
Data transferring happens back-and-forth between the target device and CPU.
The network is constructed as following:
A B
\ /
elemwise_add (gpu)
\
copy C
\ /
elemwise_sub (cpu)
\
copy D
\ /
elemwise_add (gpu)
Parameters
----------
host_dev_type : int
The device type of the host processor, e.g. cpu.
device_d |
ev_type : int
The device type of the device processor, e.g. gpu, opencl, etc.
Returns
-------
json : json
A json encoded object.
"""
var_a = {"op": "null", "name": "A", "inputs": []}
var_b = {"op": "null", "name": "B", "inputs": []}
elemwise_add0 = {
"op": "tvm_op",
"name": "elemwise_add0",
"attrs": {
"flatten_data": "1",
"func_name": "elemwise_add0",
"num_inputs": "2",
"num_outputs": "1",
},
"inputs": [[0, 0, 0], [1, 0, 0]],
}
copy_add_sub = {
"op": "tvm_op",
"name": "__copy_add_to_sub",
"attrs": {
"flatten_data": "0",
"func_name": "__copy",
"num_inputs": "1",
"num_outputs": "1",
},
"inputs": [[2, 0, 0]],
}
var_c = {"op": "null", "name": "C", "inputs": []}
elemwise_sub = {
"op": "tvm_op",
"name": "elemwise_sub",
"attrs": {
"flatten_data": "0",
"func_name": "elemwise_sub",
"num_inputs": "2",
"num_outputs": "1",
},
"inputs": [[3, 0, 0], [4, 0, 0]],
}
copy_sub_add = {
"op": "tvm_op",
"name": "__copy_sub_to_add",
"attrs": {
"flatten_data": "0",
"func_name": "__copy",
"num_inputs": "1",
"num_outputs": "1",
},
"inputs": [[5, 0, 0]],
}
var_d = {"op": "null", "name": "D", "inputs": []}
elemwise_add1 = {
"op": "tvm_op",
"name": "elemwise_add1",
"attrs": {
"flatten_data": "0",
"func_name": "elemwise_add1",
"num_inputs": "2",
"num_outputs": "1",
},
"inputs": [[6, 0, 0], [7, 0, 0]],
}
nodes = [
var_a,
var_b,
elemwise_add0,
copy_add_sub,
var_c,
elemwise_sub,
copy_sub_add,
var_d,
elemwise_add1,
]
arg_nodes = [0, 1 |
, 4, 7]
node_row_ptr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
heads = [[8, 0, 0]]
shape = (4,)
attrs = {
"storage_id": ["list_int", [4, 5, 0, 1, 6, 2, 0, 7, 3]],
"shape": ["list_shape", [shape, shape, shape, shape, shape, shape, shape, shape, shape]],
"device_index": [
"list_int",
[
device_dev_type,
device_dev_type,
device_dev_type,
host_dev_type,
host_dev_type,
host_dev_type,
device_dev_type,
device_dev_type,
device_dev_type,
],
],
"dtype": ["list_int", [0, 0, 0, 0, 0, 0, 0, 0, 0]],
"dltype": [
"list_str",
[
"float32",
"float32",
"float32",
"float32",
"float32",
"float32",
"float32",
"float32",
"float32",
],
],
}
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": heads,
"attrs": attrs,
}
return json.dumps(graph)
def test_duplex_data_transferring():
r"""
Test the heterogeneous execution of a simple network where data
transferring occurs back-and-forth between the target device and host
processor.
The host processor is always assumed to be cpu, and the target device
varies.
"""
host = "cpu"
target_host = "llvm"
host_dev = tvm.device(host)
if not tvm.runtime.enabled(target_host):
print("Skip test because llvm is not enabled.")
return
def check_device(device, target_device):
if not tvm.runtime.enabled(target_device):
print("Skip test because {} is not enabled.".format(target_device))
return
device_dev = tvm.device(device)
graph = get_duplex_graph(host_dev.device_type, device_dev.device_ty |
pe)
shape = (4,)
copy_add_sub = te.placeholder(shape, name="__copy0")
copy_sub_add = te.placeholder(shape, name="__copy1")
tensor_a = te.placeholder(shape, name="A")
tensor_b = te.placeholder(shape, name="B")
tensor_d = te.placeholder(shape, name="D")
elemwise_add0 = te.compute(
shape, lambda *i: tensor_a(*i) + tensor_b(*i), name="elemwise_add0"
)
elemwise_add1 = te.compute(
shape, lambda *i: copy_sub_add(*i) + tensor_d(*i), name="elemwise_add1"
)
target = topi.cpp.TEST_create_target(device)
add_schedule0 = topi.cpp.cuda.schedule_injective(target, [elemwise_add0])
lower_add0 = tvm.lower(
add_schedule0, [tensor_a, tensor_b, elemwise_add0], name="elemwise_add0"
)
add_schedule1 = topi.cpp.cuda.schedule_injective(target, [elemwise_add1])
lower_add1 = tvm.lower(
add_schedule1, [tensor_d, copy_sub_add, elemwise_add1], name="elemwise_add1"
)
tensor_c = te.placeholder(shape, name="C")
elemwise_sub = te.compute(
shape, lambda *i: copy_add_sub(*i) - tensor_c(*i), name="elemwise_sub"
)
sub_schedule = te.create_schedule(elemwise_sub.op)
lower_sub = tvm.lower(
sub_schedule, [copy_add_sub, tensor_c, elemwise_sub], name="elemwise_sub"
)
lower_add0.update(lower_add1)
target_flist = {target_device: lower_add0, target_host: lower_sub}
target = tvm.target.Target(target, target_host)
mhost = tvm.build(target_flist, target=target)
dev = [host_dev, device_dev]
params = {}
params["A"] = tensor_a = np.random.uniform(size=shape).astype(tensor_a.dtype)
params["B"] = tensor_b = np.random.uniform(size=shape).astype(tensor_b.dtype)
params["C"] = tensor_c = np.random.uniform(size=shape).astype(tensor_c.dtype)
params["D"] = tensor_d = np.random.uniform(size=shape).as |
type(tensor_d.dtype)
def check_verify():
mod = graph_executor.create(graph, mhost, dev)
mod.set_input(**params)
mod.run()
out = mod.get_output(0, tvm.nd.empty(shape))
np.testing.assert_equal(out.numpy(), tensor_a + tensor_b - tensor_c + tensor_d)
def check_load_module():
temp = utils.tempdir()
path_lib = temp.relpath("deploy.so")
mhost.export_library(path_lib)
with open(temp.relpath("deploy.json"), "w") as out_file:
out_file.write(graph)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_graph = open(temp.relpath("deploy.json")).read()
mod = graph_executor.create(loaded_graph, loaded_lib, dev)
mod.set_input(**params)
mod.run()
out = mod.get_output(0, tvm.nd.empty(shape))
np.testing.assert_equal(out.numpy(), tensor_a + tensor_b - tensor_c + tensor_d)
check_verify()
check_load_module()
dev_tar = {"cuda": "cuda", "opencl": "opencl"}
for device, target in dev_tar.items():
with tvm.target.Target(device):
check_device(device, target)
if __name__ == "__main__":
test_simplex_data_transferring()
test_duplex_data_transferring() |
import time |
import ctypes |
import tvm
from tvm |
import te
from tvm.contrib.utils |
import tempdir
from tvm.runtime.module |
import BenchmarkResult
def test_min_repeat_ms():
tmp = tempdir()
filename = tmp.relpath("log")
@tvm.register_func
def my_debug(filename):
"""one call lasts for 100 ms and writes one character to a file"""
time.sleep(0.1)
with open(filename, "a") as fout:
fout.write("c")
X = te.compute((), lambda: tvm.tir.call_packed("my_debug", filename))
s = te.create_schedule(X.op)
func = tvm.build(s, [X])
x = tvm.nd.empty((), dtype="int32")
ftimer = func.time_evaluator(func.entry_name, tvm.cpu(), number=1, repeat=1)
ftimer(x)
with open(filename, "r") as fin:
ct = len(fin.readline())
assert ct == 2
ftimer = func.time_evaluator(func.entry_name, tvm.cpu(), number=1, repeat=1, min_repeat_ms=1000)
ftimer(x)
with open(filename, "r") as fin:
ct = len(fin.readline())
assert ct > 10 + 2
def test_benchmark_result():
r = BenchmarkResult([1, 2, 2, 5])
assert r.mean == 2.5
assert r.median == 2.0
assert r.min == 1
assert r.max == 5
assert r.std == 1.5
if __name__ == "__main__":
test_min_repeat_ms()
test_benchmark_result() |
import numpy as np |
import os
from tvm |
import relay, runtime
from tvm.relay |
import testing |
import tvm
from tvm.contrib |
import graph_executor
from tvm.contrib.debugger |
import debug_executor
from tvm.contrib.cuda_graph |
import cuda_graph_executor |
import tvm.testing
def input_shape(mod):
return [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
def verify(data):
if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
dev = tvm.cpu()
module = graph_executor.create(graph, lib, dev)
module.set_input("data", data)
module.set_input(**graph_params)
module.run()
out = module.get_output(0).numpy()
return out
@tvm.testing.requires_llvm
def test_legacy_compatibility():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu()
module = graph_executor.create(graph, lib, dev)
module.set_input("data", data)
module.set_input(**graph_params)
module.run()
out = module.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def test_cpu():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu()
gmod = complied_graph_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
gmod = graph_executor.GraphModule(complied_graph_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, v |
erify(data), atol=1e-5)
@tvm.testing.requires_llvm
def test_cpu_get_graph_json():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib |
import utils
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
json = loaded_lib["get_graph_json"]()
assert isinstance(json, str) == True
assert json.find("tvmgen_default_fused_nn_softmax_add") > -1
@tvm.testing.requires_llvm
def test_cpu_get_graph_params_run():
mod, params = relay.testing.synthetic.get_workload()
with tvm.transform.PassContext(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu()
from tvm.contrib |
import utils
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_params = loaded_lib["get_graph_params"]()
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
gmod.set_input(key="data", value=data, **loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def test_cpu_get_graph_params_compare():
from tvm.relay.testing.init |
import create_workload, Constant
inp_shape = (1, 3, 24, 12)
dtype = "float32"
data = relay.var("data", shape=inp_shape, dtype=dtype)
conv_shape = [inp_shape[1], inp_shape[1], 3, 3]
conv = relay.nn.conv2d(
data,
relay.var("conv_weight", shape=conv_shape, dtype=dtype),
padding=1,
kernel_size=3,
)
args = relay.analysis.free_vars(conv)
func = relay.Function(args, conv)
mod, params = create_workload(func, initializer=Constant())
with tvm.transform.PassContext(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib |
import utils
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_params = loaded_lib["get_graph_params"]()
tvm.testing.assert_allclose(
params["conv_weight"].numpy(), loaded_params["p0"].numpy()[0][0], atol=1e-5
)
@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_gpu():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cuda()
gmod = complied_graph_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
gmod = graph_executor.GraphModule(complied_graph_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.uses_gpu
def test_mod_export():
def verify_cpu_export(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib |
import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
def setup_gmod():
loaded_lib = tvm.runtime.load_module(path_lib)
dev = tvm.cpu(0)
return loaded_lib["default"](dev)
gmod = setup_gmod()
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
gmod = graph_executor.GraphModule(setup_gmod())
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_gpu_export(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib |
import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
def setup_gmod():
loaded_lib = tvm.runtime.load_module(path_lib)
dev = tvm.cuda()
return loaded_lib["default"](dev)
gmod = setup_gmod()
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
gmod = graph_executor.GraphModule(setup_gmod())
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def verify_rpc_cpu_export(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib |
import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
from tvm |
import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cpu()
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data, device=dev))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_rpc_gpu_export(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib |
import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
from tvm |
import rpc
def check_remote(server):
remote = rpc.connect(server.host, server.port)
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cuda()
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data, device=dev))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
check_remote(rpc.Server("127.0.0.1"))
for obj_format in [".so", ".tar"]:
verify_cpu_export(obj_format)
verify_gpu_export(obj_format)
verify_rpc_cpu_export(obj_format)
verify_rpc_gpu_export(obj_format)
@tvm.testing.requires_llvm
@tvm.testing.uses_gpu
def test_remove_package_params():
def verify_cpu_remove_package_params(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib |
import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
with open(temp.relpath("deploy_param.params"), "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu(0)
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
set_input("data", tvm.nd.array(data))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_gpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib |
import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
with open(temp.relpath("deploy_param.params"), "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cuda(0)
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
set_input("data", tvm.nd.array(data))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def verify_rpc_cpu_remove_package_params(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib |
import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
path_params = temp.relpath("deploy_param.params")
with open(path_params, "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
from tvm |
import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cpu()
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(path_params, "rb").read())
set_input("data", tvm.nd.array(data, device=dev))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(path_params, "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_rpc_gpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib |
import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
path_params = temp.relpath("deploy_param.params")
with open(path_params, "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
from tvm |
import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cuda()
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(path_params, "rb").read())
set_input("data", tvm.nd.array(data, device=dev))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(path_params, "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
for obj_format in [".so", ".tar"]:
verify_cpu_remove_package_params(obj_format)
verify_gpu_remove_package_params(obj_format)
verify_rpc_cpu_remove_package_params(obj_format)
verify_rpc_gpu_remove_package_params(obj_format)
@tvm.testing.requires_llvm
def test_debug_graph_executor():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu()
try:
gmod = complied_graph_lib["debug_create"]("default", dev)
except:
print("Skip because debug graph_executor not enabled")
return
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, ver |
ify(data), atol=1e-5)
debug_g_mod = debug_executor.GraphModuleDebug(
complied_graph_lib["debug_create"]("default", dev),
[dev],
complied_graph_lib.get_graph_json(),
None,
)
debug_g_mod.set_input("data", data)
debug_g_mod.run()
out = debug_g_mod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_cudagraph
def test_cuda_graph_executor():
mod, params = relay.testing.synthetic.get_workload()
with tvm.transform.PassContext(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cuda()
try:
gmod = complied_graph_lib["cuda_graph_create"](dev)
except:
print("Skip because cuda_graph not enabled")
return
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
cu_gmod = cuda_graph_executor.GraphModuleCudaGraph(gmod)
cu_gmod.set_input("data", data)
cu_gmod.run()
out = cu_gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def test_multiple_imported_modules():
def make_func(symbol):
n = tvm.te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype="float32")
i = tvm.te.var("i")
stmt = tvm.tir.For(
i,
0,
n - 1,
tvm.tir.ForKind.SERIAL,
tvm.tir.BufferStore(Ab, tvm.tir.BufferLoad(Ab, [i]) + 1, [i + 1]),
)
return tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", symbol)
def make_module(mod):
mod = tvm.IRModule(mod)
mod = tvm.driver.build(mod, target="llvm")
return mod
module_main = make_module({"main": make_func("main")})
module_a = make_module({"func_a": make_f |
unc("func_a")})
module_b = make_module({"func_b": make_func("func_b")})
module_main.import_module(module_a)
module_main.import_module(module_b)
module_main.get_function("func_a", query_imports=True)
module_main.get_function("func_b", query_imports=True)
def test_num_threads():
reported = tvm.runtime.num_threads()
env_threads = os.getenv("TVM_NUM_THREADS")
omp_env_threads = os.getenv("OMP_NUM_THREADS")
if env_threads is not None:
assert reported == env_threads
elif omp_env_threads is not None:
assert reported == omp_env_threads
else:
hardware_threads = os.cpu_count()
assert reported == hardware_threads or reported == hardware_threads
if __name__ == "__main__":
test_legacy_compatibility()
test_cpu()
test_gpu()
test_mod_export()
test_remove_package_params()
test_debug_graph_executor()
test_multiple_imported_modules()
test_cpu_get_graph_json()
test_cpu_get_graph_params_run()
test_cpu_get_graph_params_compare() |
from tvm |
import relay
from tvm.relay |
import testing |
import tvm
from tvm |
import te |
import tvm.testing
from tvm.contrib |
import utils
header_file_dir_path = utils.tempdir()
def gen_engine_header():
code = r""" |
class Engine {
};
"""
header_file = header_file_dir_path.relpath("gcc_engine.h")
with open(header_file, "w") as f:
f.write(code)
def generate_engine_module():
code = r"""
extern "C" void gcc_1_(float* gcc_input4, float* gcc_input5,
float* gcc_input6, float* gcc_input7, float* out) {
Engine engine;
}
""" |
import tvm.runtime._ffi_api
gen_engine_header()
csource_module = tvm.runtime._ffi_api.CSourceModuleCreate(code, "cc", [], None)
return csource_module
@tvm.testing.uses_gpu
def test_mod_export():
def verify_gpu_mod_export(obj_format):
for device in ["llvm", "cuda"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
synthetic_mod, synthetic_params = relay.testing.synthetic.get_workload()
synthetic_llvm_mod, synthetic_llvm_params = relay.testing.synthetic.get_workload()
with tvm.transform.PassContext(opt_level=3):
_, synthetic_gpu_lib, _ = relay.build_module.build(
synthetic_mod, "cuda", params=synthetic_params, mod_name="cudalib"
)
_, synthetic_llvm_cpu_lib, _ = relay.build_module.build(
synthetic_llvm_mod, "llvm", params=synthetic_llvm_params, mod_name="llvmlib"
)
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
synthetic_gpu_lib.import_module(synthetic_llvm_cpu_lib)
synthetic_gpu_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
assert loaded_lib.type_key == "library"
assert loaded_lib.imported_modules[0].type_key == "cuda"
assert len(loaded_lib.imported_modules) == 1
def verify_multi_dso_mod_export(obj_format):
for device in ["llvm"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
A = te.placeholder((1024,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
mod0 = tvm.build(s, [A, B], "llvm", name="myadd0")
mod1 = tvm.bui |
ld(s, [A, B], "llvm", name="myadd1")
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
mod0.import_module(mod1)
mod0.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
assert loaded_lib.type_key == "library"
assert len(loaded_lib.imported_modules) == 0
def verify_json_import_dso(obj_format):
for device in ["llvm"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
subgraph_json = (
"json_rt_0\n"
+ "input 0 10 10\n"
+ "input 1 10 10\n"
+ "input 2 10 10\n"
+ "input 3 10 10\n"
+ "add 4 inputs: 0 1 shape: 10 10\n"
+ "sub 5 inputs: 4 2 shape: 10 10\n"
+ "mul 6 inputs: 5 3 shape: 10 10\n"
+ "json_rt_1\n"
+ "input 0 10 10\n"
+ "input 1 10 10\n"
+ "input 2 10 10\n"
+ "input 3 10 10\n"
+ "add 4 inputs: 0 1 shape: 10 10\n"
+ "sub 5 inputs: 4 2 shape: 10 10\n"
+ "mul 6 inputs: 5 3 shape: 10 10"
)
temp = utils.tempdir()
subgraph_path = temp.relpath("subgraph.examplejson")
with open(subgraph_path, "w") as f:
f.write(subgraph_json)
A = te.placeholder((1024,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm", name="myadd")
try:
ext_lib = tvm.runtime.load_module(subgraph_path, "examplejson")
except:
print("skip because Loader of examplejson is not presented")
return
ext_lib.import_module(f)
if obj_format == ".so":
f |
ile_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
ext_lib.export_library(path_lib)
lib = tvm.runtime.load_module(path_lib)
assert lib.type_key == "examplejson"
assert lib.imported_modules[0].type_key == "library"
def verify_multi_c_mod_export():
from shutil |
import which
if which("gcc") is None:
print("Skip test because gcc is not available.")
for device in ["llvm"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
synthetic_mod, synthetic_params = relay.testing.synthetic.get_workload()
with tvm.transform.PassContext(opt_level=3):
_, synthetic_cpu_lib, _ = relay.build_module.build(
synthetic_mod, "llvm", params=synthetic_params
)
A = te.placeholder((1024,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "c", name="myadd")
engine_module = generate_engine_module()
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
synthetic_cpu_lib.import_module(f)
synthetic_cpu_lib.import_module(engine_module)
kwargs = {"options": ["-O2", "-std=c++17", "-I" + header_file_dir_path.relpath("")]}
synthetic_cpu_lib.export_library(path_lib, fcompile=False, **kwargs)
loaded_lib = tvm.runtime.load_module(path_lib)
assert loaded_lib.type_key == "library"
assert len(loaded_lib.imported_modules) == 0
for obj_format in [".so", ".tar"]:
verify_gpu_mod_export(obj_format)
verify_multi_dso_mod_export(obj_format)
verify_json_import_dso(obj_format)
verify_multi_c_mod_export()
@tvm.testing.requires_llvm
def test_import_static_library():
A = te.placeholder((1024,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
mod0 = tvm.build(s, [A, B], "llvm", name="myadd0")
mod1 = tvm.build(s, [A, B], "llvm", name="myadd1")
assert mod0.implements_function("myadd0")
assert mod1.implements_function("myadd1")
assert mod1.is_dso_exportable
temp = utils.tempdir() |
mod1_o_path = temp.relpath("mod1.o")
mod1.save(mod1_o_path)
mod1_o = tvm.runtime.load_static_library(mod1_o_path, ["myadd1"])
assert mod1_o.implements_function("myadd1")
assert mod1_o.is_dso_exportable
mod0.import_module(mod1_o)
mod0_dso_path = temp.relpath("mod0.so")
mod0.export_library(mod0_dso_path)
loaded_lib = tvm.runtime.load_module(mod0_dso_path)
assert loaded_lib.type_key == "library"
assert len(loaded_lib.imported_modules) == 0
assert loaded_lib.implements_function("myadd0")
assert loaded_lib.get_function("myadd0")
assert loaded_lib.implements_function("myadd1")
assert loaded_lib.get_function("myadd1")
assert not loaded_lib.is_dso_exportable
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te
from tvm.contrib |
import cc, utils |
import ctypes |
import sys |
import numpy as np |
import subprocess |
import tvm.testing
from tvm.relay.backend |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.