text
stringlengths 1
2.05k
|
---|
import Runtime
runtime_py = """ |
import os |
import sys
os.environ["TVM_USE_RUNTIME_LIB"] = "1"
os.environ["TVM_FFI"] = "ctypes" |
import tvm
from tvm |
import te |
import numpy as np
path_dso = sys.argv[1]
dtype = sys.argv[2]
ff = tvm.runtime.load_module(path_dso)
a = tvm.nd.array(np.zeros(10, dtype=dtype))
ff(a)
np.testing.assert_equal(a.numpy(), np.arange(a.shape[0]))
print("Finish runtime checking...")
"""
def test_dso_module_load():
if not tvm.testing.device_enabled("llvm"):
return
dtype = "int64"
temp = utils.tempdir()
def save_object(names):
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.var("i")
stmt = tvm.tir.For(
i,
0,
n - 1,
tvm.tir.ForKind.SERIAL,
tvm.tir.BufferStore(Ab, tvm.tir.BufferLoad(Ab, [i]) + 1, [i + 1]),
)
mod = tvm.IRModule.from_expr(
tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "main")
)
m = tvm.driver.build(mod, target="llvm")
for name in names:
m.save(name)
path_obj = temp.relpath("test.o")
path_ll = temp.relpath("test.ll")
path_bc = temp.relpath("test.bc")
path_dso = temp.relpath("test.so")
save_object([path_obj, path_ll, path_bc])
cc.create_shared(path_dso, [path_obj])
f1 = tvm.runtime.load_module(path_dso)
f2 = tvm.runtime.load_module(path_ll)
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f1(a)
np.testing.assert_equal(a.numpy(), np.arange(a.shape[0]))
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f2(a)
np.testing.assert_equal(a.numpy(), np.arange(a.shape[0]))
path_runtime_py = temp.relpath("runtime.py")
with open(path_runtime_py, "w") as fo:
fo.write(runtime_py)
proc = subprocess.run(
[sys.executable, path_runtime_py, path_dso, dtype],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
assert proc.returncode == 0, f"{proc.args} exited with {proc.returncode}: {proc.stdout}"
@tvm.testing.requires_gpu
def test_device_module_dump():
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name="A")
B = te.comp |
ute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
num_thread = 8
bx, tx = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
temp = utils.tempdir()
name = "myadd_%s" % device
if sys.platform == "darwin" or sys.platform.startswith("linux"):
runtime = Runtime("cpp", {"system-lib": True})
f = tvm.build(s, [A, B], device, "llvm", runtime=runtime, name=name)
elif sys.platform == "win32":
f = tvm.build(s, [A, B], device, "llvm", name=name)
else:
raise ValueError("Unsupported platform")
path_dso = temp.relpath("dev_lib.so")
f.export_library(path_dso, cc.cross_compiler("g++"))
f1 = tvm.runtime.load_module(path_dso)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev)
f1(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
if sys.platform != "win32":
f2 = tvm.runtime.system_lib()
f2[name](a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
def check_stackvm(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
temp = utils.tempdir()
name = "myadd_%s" % device
f = tvm.build(s, [A, B], device, "stackvm", name=name)
path_dso = temp.relpath("dev_lib.stackvm")
f.export_library(path_dso)
f1 = tvm.runtime.load_module(path_dso)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) |
f(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
for device in ["cuda", "vulkan", "opencl", "metal"]:
check_device(device)
check_stackvm(device)
def test_combine_module_llvm():
"""Test combine multiple module into one shared lib."""
nn = 12
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
def check_llvm():
dev = tvm.cpu(0)
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
temp = utils.tempdir()
fadd1 = tvm.build(s, [A, B], "llvm", name="myadd1")
fadd2 = tvm.build(s, [A, B], "llvm", name="myadd2")
path1 = temp.relpath("myadd1.o")
path2 = temp.relpath("myadd2.o")
path_dso = temp.relpath("mylib.so")
fadd1.save(path1)
fadd2.save(path2)
cc.create_shared(path_dso, [path1, path2])
m = tvm.runtime.load_module(path_dso)
fadd1 = m["myadd1"]
fadd2 = m["myadd2"]
a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), dev)
fadd1(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
fadd2(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
def check_system_lib():
dev = tvm.cpu(0)
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
temp = utils.tempdir()
runtime = Runtime("cpp", {"system-lib": True})
fadd1 = tvm.build(s, [A, B], "llvm", runtime=runtime, name="myadd1")
fadd2 = tvm.build(s, [A, B], "llvm", runtime=runtime, name="myadd2")
path1 = temp.relpath("myadd1.o")
path2 = temp.relpath("myadd2.o")
path_dso = temp.relpath("mylib.so")
fadd1.save(path1)
fadd2.save(path2)
cc.create_shared(path_dso, |
[path1, path2])
ctypes.CDLL(path_dso)
mm = tvm.runtime.system_lib()
a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), dev)
mm["myadd1"](a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
mm["myadd2"](a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
if sys.platform != "win32":
check_system_lib()
check_llvm()
if __name__ == "__main__":
test_combine_module_llvm()
test_device_module_dump()
test_dso_module_load() |
import numpy as np |
import pytest
from io |
import StringIO |
import csv |
import os |
import json |
import platform |
import tvm.testing |
import tvm.utils
from tvm.runtime |
import profiler_vm
from tvm |
import relay
from tvm.relay.testing |
import mlp
from tvm.contrib.debugger |
import debug_executor
from tvm |
import rpc
from tvm.contrib |
import utils
from tvm.runtime.profiling |
import Report
from tvm.script |
import tir as T
def read_csv(report):
f = StringIO(report.csv())
headers = []
rows = []
reader = csv.reader(f, delimiter=",")
in_header = True
for row in reader:
if in_header:
headers = row
in_header = False
rows = [[] for x in headers]
else:
for i in range(len(row)):
rows[i].append(row[i])
return dict(zip(headers, rows))
@pytest.mark.skipif(not profiler_vm.enabled(), reason="VM Profiler not enabled")
@tvm.testing.skip_if_wheel_test
@tvm.testing.parametrize_targets
def test_vm(target, dev):
dtype = "float32"
x = relay.var("x", shape=(relay.Any(), relay.Any()), dtype=dtype)
y = relay.var("y", shape=(relay.Any(), relay.Any()), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], relay.add(x, y))
exe = relay.vm.compile(mod, target)
vm = profiler_vm.VirtualMachineProfiler(exe, dev)
data = np.random.rand(28, 28).astype("float32")
report = vm.profile(data, data, func_name="main")
assert "fused_add" in str(report)
assert "Total" in str(report)
assert "AllocTensorReg" in str(report)
assert "AllocStorage" in str(report)
assert report.configuration["Executor"] == "VM"
csv = read_csv(report)
assert "Hash" in csv.keys()
assert all(
[
float(dur) > 0
for dur, name in zip(csv["Duration (us)"], csv["Name"])
if name[:5] == "fused"
]
)
assert all(
[
float(dur) >= 0
for dur, name in zip(csv["Duration (us)"], csv["Name"])
if name[:5] != "fused"
]
)
@tvm.testing.parametrize_targets
def test_graph_executor(target, dev):
mod, params = mlp.get_workload(1)
exe = relay.build(mod, target, params=params)
gr = debug_executor.create(exe.get_graph_json(), exe.lib, dev)
data = np.random.rand(1, 1, 28, 28).astype("float32")
report = gr.profile(data=data)
assert "fused_nn_softmax" in str(report |
)
assert "Total" in str(report)
assert "Hash" in str(report)
assert "Graph" in str(report)
@tvm.testing.parametrize_targets("cuda", "llvm")
@pytest.mark.skipif(
tvm.get_global_func("runtime.profiling.PAPIMetricCollector", allow_missing=True) is None,
reason="PAPI profiling not enabled",
)
def test_papi(target, dev):
target = tvm.target.Target(target)
if str(target.kind) == "llvm":
metric = "PAPI_FP_OPS"
elif str(target.kind) == "cuda":
metric = "cuda:::event:shared_load:device=0"
else:
pytest.skip(f"Target {target.kind} not supported by this test")
mod, params = mlp.get_workload(1)
exe = relay.vm.compile(mod, target, params=params)
vm = profiler_vm.VirtualMachineProfiler(exe, dev)
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"), device=dev)
report = vm.profile(
data,
func_name="main",
collectors=[tvm.runtime.profiling.PAPIMetricCollector({dev: [metric]})],
)
assert metric in str(report)
csv = read_csv(report)
assert metric in csv.keys()
assert any([float(x) > 0 for x in csv[metric]])
@tvm.testing.requires_llvm
def test_json():
mod, params = mlp.get_workload(1)
exe = relay.vm.compile(mod, "llvm", params=params)
vm = profiler_vm.VirtualMachineProfiler(exe, tvm.cpu())
data = np.random.rand(1, 1, 28, 28).astype("float32")
report = vm.profile(data, func_name="main")
parsed = json.loads(report.json())
assert "device_metrics" in parsed
assert "calls" in parsed
assert "configuration" in parsed
assert "Duration (us)" in parsed["calls"][0]
assert "microseconds" in parsed["calls"][0]["Duration (us)"]
assert len(parsed["calls"]) > 0
for call in parsed["calls"]:
assert isinstance(call["Name"]["string"], str)
assert isinstance(call["Count"]["count"], int)
assert isinstance(call["Duration (us)"]["microseconds"], float)
@tvm.testing.requires_llvm
def test_rpc_vm():
server = rpc.Server(key="profiling |
")
remote = rpc.connect("127.0.0.1", server.port, key="profiling")
mod, params = mlp.get_workload(1)
exe = relay.vm.compile(mod, "llvm", params=params)
temp = utils.tempdir()
path = temp.relpath("lib.tar")
exe.mod.export_library(path)
remote.upload(path)
rexec = remote.load_module("lib.tar")
vm = profiler_vm.VirtualMachineProfiler(rexec, remote.cpu())
report = vm.profile(tvm.nd.array(np.ones((1, 1, 28, 28), dtype="float32"), device=remote.cpu()))
assert len(report.calls) > 0
def test_rpc_graph():
server = rpc.Server(key="profiling")
remote = rpc.connect("127.0.0.1", server.port, key="profiling")
mod, params = mlp.get_workload(1)
exe = relay.build(mod, "llvm", params=params)
temp = utils.tempdir()
path = temp.relpath("lib.tar")
exe.export_library(path)
remote.upload(path)
rexec = remote.load_module("lib.tar")
gr = debug_executor.create(exe.get_graph_json(), rexec, remote.cpu())
data = np.random.rand(1, 1, 28, 28).astype("float32")
report = gr.profile(data=data)
assert len(report.calls) > 0
def test_report_serialization():
mod, params = mlp.get_workload(1)
exe = relay.vm.compile(mod, "llvm", params=params)
vm = profiler_vm.VirtualMachineProfiler(exe, tvm.cpu())
data = np.random.rand(1, 1, 28, 28).astype("float32")
report = vm.profile(data, func_name="main")
report2 = Report.from_json(report.json())
assert report.table(aggregate=False, col_sums=False) == report2.table(
aggregate=False, col_sums=False
)
@T.prim_func
def axpy_cpu(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [10], "float64")
B = T.match_buffer(b, [10], "float64")
C = T.match_buffer(c, [10], "float64")
for i in range(10):
C[i] = A[i] + B[i]
@T.prim_func
def axpy_gpu(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [10], "float64")
B = T.match_buffer(b, [10], "float64")
C = T.match_buffer(c, [10], "f |
loat64")
for i in T.thread_binding(0, 10, "threadIdx.x"):
C[i] = A[i] + B[i]
@tvm.testing.parametrize_targets("cuda", "llvm")
@pytest.mark.skipif(
tvm.get_global_func("runtime.profiling.PAPIMetricCollector", allow_missing=True) is None,
reason="PAPI profiling not enabled",
)
def test_profile_function(target, dev):
target = tvm.target.Target(target)
if str(target.kind) == "llvm":
metric = "PAPI_FP_OPS"
func = axpy_cpu
elif str(target.kind) == "cuda":
metric = (
"cuda:::gpu__compute_memory_access_throughput.max.pct_of_peak_sustained_region:device=0"
)
func = axpy_gpu
else:
pytest.skip(f"Target {target.kind} not supported by this test")
f = tvm.build(func, target=target)
a = tvm.nd.array(np.ones(10), device=dev)
b = tvm.nd.array(np.ones(10), device=dev)
c = tvm.nd.array(np.zeros(10), device=dev)
report = tvm.runtime.profiling.profile_function(
f, dev, [tvm.runtime.profiling.PAPIMetricCollector({dev: [metric]})]
)(a, b, c)
assert metric in report.keys()
assert report[metric].value > 0
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te |
import tvm.testing |
import multiprocessing |
import os |
import stat |
import sys |
import time |
import pytest |
import numpy as np
from tvm |
import rpc
from tvm.relay.backend |
import Runtime
from tvm.contrib |
import utils, cc
from tvm.rpc.tracker |
import Tracker
from tvm.rpc.proxy |
import Proxy
if __name__ == "__main__":
tvm.testing.main()
pytestmark = pytest.mark.skipif(
sys.platform.startswith("win") == False and multiprocessing.get_start_method() != "fork",
reason=(
"pytest + multiprocessing spawn method causes tvm.register_func to "
"not work on the rpc.Server."
),
)
@tvm.testing.requires_rpc
def test_bigendian_rpc():
"""Test big endian rpc when there is a PowerPC RPC server available"""
host = os.environ.get("TVM_POWERPC_TEST_HOST", None)
port = os.environ.get("TVM_POWERPC_TEST_PORT", 9090)
if host is None:
return
def verify_rpc(remote, target, shape, dtype):
A = te.placeholder(shape, dtype=dtype)
B = te.compute(A.shape, lambda i: A[i] + tvm.tir.const(1, A.dtype))
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], target, name="myadd")
dev = remote.cpu(0)
a = tvm.nd.array(np.random.randint(0, 256, size=shape).astype(A.dtype), device=dev)
b = tvm.nd.array(np.zeros(shape).astype(A.dtype), device=dev)
temp = utils.tempdir()
path_dso = temp.relpath("dev_lib.o")
f.save(path_dso)
remote.upload(path_dso)
f = remote.load_module("dev_lib.o")
f(a, b)
tvm.testing.assert_allclose(a.numpy() + 1, b.numpy())
print("Test RPC connection to PowerPC...")
remote = rpc.connect(host, port)
target = "llvm -mtriple=powerpc-linux-gnu"
for dtype in ["float32", "float64", "int32", "int8"]:
verify_rpc(remote, target, (10,), dtype)
@tvm.testing.requires_rpc
def test_rpc_simple():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
def check_remote():
f1 = client.get_function("rpc.test.addone")
assert f1(10) == 11
f3 = client.get_function("rpc.test.except")
with pytest.raises(tvm._ffi.base.TVMError):
f3("abc")
f2 = client.get_function("rpc.test.strcat")
assert f2("abc", 11) == "ab |
c:11"
check_remote()
@tvm.testing.requires_rpc
def test_rpc_simple_wlog():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1", enable_logging=True)
def check_remote():
f1 = client.get_function("rpc.test.addone")
assert f1(10) == 11
f3 = client.get_function("rpc.test.except")
with pytest.raises(tvm._ffi.base.TVMError):
f3("abc")
f2 = client.get_function("rpc.test.strcat")
assert f2("abc", 11) == "abc:11"
check_remote()
@tvm.testing.requires_rpc
def test_rpc_runtime_string():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
def check_remote():
func = client.get_function("rpc.test.runtime_str_concat")
x = tvm.runtime.container.String("abc")
y = tvm.runtime.container.String("def")
assert str(func(x, y)) == "abcdef"
check_remote()
@tvm.testing.requires_rpc
def test_rpc_array():
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
def check_remote():
x = np.ones((3, 4))
r_cpu = tvm.nd.array(x, remote.cpu(0))
assert str(r_cpu.device).startswith("remote")
np.testing.assert_equal(r_cpu.numpy(), x)
fremote = remote.get_function("rpc.test.remote_array_func")
fremote(r_cpu)
check_remote()
@tvm.testing.requires_rpc
def test_rpc_large_array():
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
def check_remote():
dev = remote.cpu(0)
a_np = np.ones((5041, 720)).astype("float32")
b_np = np.ones((720, 192)).astype("float32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
np.testing.assert_equal(a.numpy(), a_np)
np.testing.assert_equal(b.numpy(), b_np)
check_remote()
@tvm.testing.requires_rpc
def test_rpc_echo():
def check(remote):
fecho = remote.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
ass |
ert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
with pytest.raises(RuntimeError):
raise_err = remote.get_function("testing.test_raise_error_callback")("RuntimeError")
raise_err()
remote.cpu().sync()
with pytest.raises(AttributeError):
f3 = remote.system_lib()["notexist"]
temp = rpc.server._server_env([])
server = rpc.Server()
client = rpc.connect("127.0.0.1", server.port)
check(rpc.LocalSession())
check(client)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
temp = utils.tempdir()
minrpc_exec = temp.relpath("minrpc")
tvm.rpc.with_minrpc(cc.create_executable)(minrpc_exec, [])
check(rpc.PopenSession(minrpc_exec))
server = rpc.Server()
client = rpc.connect(
"127.0.0.1",
server.port,
session_constructor_args=["rpc.PopenSession", open(minrpc_exec, "rb").read()],
)
check(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_file_exchange():
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
def check_remote():
blob = bytearray(np.random.randint(0, 10, size=(10)))
remote.upload(blob, "dat.bin")
rev = remote.download("dat.bin")
assert rev == blob
check_remote()
@tvm.testing.requires_rpc
@tvm.testing.requires_llvm
def test_rpc_remote_module():
n = tvm.runtime.convert(102)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
server0 = rpc.Server(key="x0")
server1 = rpc.Server(key="x1")
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1", False],
)
def check_remote(remot |
e):
temp = utils.tempdir()
dev = remote.cpu(0)
f = tvm.build(s, [A, B], "llvm", name="myadd")
path_dso = temp.relpath("dev_lib.so")
f.export_library(path_dso)
remote.upload(path_dso)
f1 = remote.load_module("dev_lib.so")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
time_f = f1.time_evaluator(f1.entry_name, remote.cpu(0), number=10)
cost = time_f(a, b).mean
print("%g secs/op" % cost)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
path_tar = temp.relpath("dev_lib.tar")
f.export_library(path_tar)
remote.upload(path_tar)
local_download_path = temp.relpath("dev_lib.download.so")
with open(local_download_path, "wb") as fo:
fo.write(remote.download_linked_module("dev_lib.tar"))
fupdated = tvm.runtime.load_module(local_download_path)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), tvm.cpu(0))
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), tvm.cpu(0))
fupdated(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
temp = utils.tempdir()
runtime = Runtime("cpp", {"system-lib": True})
f = tvm.build(s, [A, B], "llvm", name="myadd", runtime=runtime)
path_minrpc = temp.relpath("dev_lib.minrpc")
f.export_library(path_minrpc, rpc.with_minrpc(cc.create_executable))
with pytest.raises(RuntimeError):
rpc.PopenSession("filenotexist")
remote = tvm.rpc.PopenSession(path_minrpc)
dev = remote.cpu(0)
f1 = remote.system_lib()
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
time_f = f1.time_evaluator("myadd", remote.cpu(0), n |
umber=1)
cost = time_f(a, b).mean
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
os.chmod(path_minrpc, stat.S_IRUSR)
with pytest.raises(RuntimeError):
rpc.PopenSession(path_minrpc)
def check_remote_link_cl(remote):
"""Test function to run remote code such as cl
This is not enabled because there is forking issue
of TVM runtime when server launches after OpenCL
runtime initializes. We leave it as an example
on how to do rpc when we want to do linking on remote.
"""
if not tvm.testing.device_enabled("opencl"):
print("Skip because opencl is not enabled")
return
temp = utils.tempdir()
dev = remote.cl(0)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=32)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
f = tvm.build(s, [A, B], "opencl --host=llvm", name="myadd")
path_o = temp.relpath("myadd.o")
path_cl = temp.relpath("myadd.cl")
path_json = temp.relpath("myadd.tvm_meta.json")
f.save(path_o)
f.imported_modules[0].save(path_cl)
remote.upload(path_o)
remote.upload(path_cl)
remote.upload(path_json)
fhost = remote.load_module("myadd.o")
fdev = remote.load_module("myadd.cl")
fhost.import_module(fdev)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
fhost(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
path_tar = temp.relpath("myadd.tar")
f.export_library(path_tar)
remote.upload(path_tar)
fhost = remote.load_module("myadd.tar")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
fhost(a, b)
np.testing.assert_equal(b.num |
py(), a.numpy() + 1)
check_remote(rpc.LocalSession())
check_remote(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_return_func():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
def check_remote():
f1 = client.get_function("rpc.test.add_to_lhs")
fadd = f1(10)
assert fadd(12) == 22
check_remote()
@tvm.testing.requires_rpc
def test_rpc_session_constructor_args():
server0 = rpc.Server(key="x0")
server1 = rpc.Server(key="x1")
def check_multi_hop():
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1", False],
)
fecho = client.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
nd = tvm.nd.array([1, 2, 3], device=client.cpu(0))
assert nd.numpy()[1] == 2
def check_error_handling():
with pytest.raises(tvm.error.RPCError):
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.NonExistingConstructor"],
)
check_multi_hop()
check_error_handling()
@tvm.testing.requires_rpc
def test_rpc_return_ndarray():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
m = client.get_function("rpc.test.remote_return_nd")
get_arr = m("get_arr")
ref_count = m("ref_count")
get_elem = m("get_elem")
get_arr_elem = m("get_arr_elem")
def run_arr_test():
arr = get_arr()
assert get_elem(0) == 0.0
assert get_arr_elem(arr, 0) == 0.0
run_arr_test()
@tvm.testing.requires_rpc
def test_local_func():
client = rpc.LocalSession()
def check_remote(): |
f1 = client.get_function("rpc.test.add_to_lhs")
fadd = f1(10)
assert fadd(12) == 22
blob = bytearray(np.random.randint(0, 10, size=(10)))
client.upload(blob, "dat.bin")
rev = client.download("dat.bin")
assert rev == blob
check_remote()
@tvm.testing.requires_rpc
def test_rpc_tracker_register():
tracker = Tracker(port=9000, port_end=10000)
device_key = "test_device"
server1 = rpc.Server(
host="127.0.0.1",
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
)
server2 = rpc.Server(
host="127.0.0.1",
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
custom_addr="test_addr",
)
time.sleep(1)
client = rpc.connect_tracker("127.0.0.1", tracker.port)
def exist_address(summary, key, host, port):
server_info = summary["server_info"]
for device in server_info:
if device["key"] == "server:%s" % key:
addr = device["addr"]
if (host is None or host == addr[0]) and port == addr[1]:
return True
return False
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 2
assert exist_address(summary, device_key, "127.0.0.1", server1.port)
assert exist_address(summary, device_key, "test_addr", server2.port)
remote = client.request(device_key)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
del remote
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 2
server1.terminate()
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
assert not exist_address(summary, device_key, "127.0.0.1", server1.port)
assert exist_address(summary, device_key, "test_addr", server2.port)
server2.terminate() |
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert not exist_address(summary, device_key, "test_addr", server2.port)
tracker.terminate()
def _target(host, port, device_key, timeout):
client = rpc.connect_tracker(host, port)
remote = client.request(device_key, session_timeout=timeout)
while True:
pass
remote.cpu()
@tvm.testing.requires_rpc
def test_rpc_tracker_request():
tracker = Tracker(port=9000, port_end=10000)
device_key = "test_device"
server = rpc.Server(
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
)
client = rpc.connect_tracker("127.0.0.1", tracker.port)
proc1 = multiprocessing.Process(target=_target, args=("127.0.0.1", tracker.port, device_key, 4))
proc2 = multiprocessing.Process(
target=_target, args=("127.0.0.1", tracker.port, device_key, 200)
)
proc1.start()
time.sleep(0.5)
proc2.start()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 1
proc1.terminate()
proc1.join()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 0
proc2.terminate()
proc2.join()
server.terminate()
tracker.terminate()
@tvm.testing.requires_rpc
def test_rpc_tracker_via_proxy():
"""
tracker
/ \
Host -- Proxy -- RPC server
"""
device_key = "test_device"
tracker_server = Tracker(port=9000, port_end=9100)
proxy_server = Proxy(
host=tracker_server.host,
port=8888,
port_end=8988,
tracker_addr=(tracker_server.host, tracker_server.port),
)
server1 = rpc.Server(
host=proxy_server.host,
port=proxy_server.port,
key=device_key,
tra |
cker_addr=(tracker_server.host, tracker_server.port),
is_proxy=True,
)
server2 = rpc.Server(
host=proxy_server.host,
port=proxy_server.port,
key=device_key,
tracker_addr=(tracker_server.host, tracker_server.port),
is_proxy=True,
)
client = rpc.connect_tracker(tracker_server.host, tracker_server.port)
remote1 = client.request(device_key, session_timeout=30)
remote2 = client.request(device_key, session_timeout=30)
server2.terminate()
server1.terminate()
proxy_server.terminate()
tracker_server.terminate() |
import tvm
from tvm |
import te |
import numpy as np
def test_trace_default_action():
n = 2
x = te.placeholder((n, n, n), name="X", dtype="float32")
y = te.compute(x.shape, lambda i, j, k: tvm.tir.trace([i, j, k, x[i][j][k]]))
s = te.create_schedule(y.op)
f = tvm.build(s, [x, y], target="llvm")
xnd = tvm.nd.array(np.ones((n, n, n), dtype=x.dtype))
ynd = tvm.nd.array(np.zeros((n, n, n), dtype=y.dtype))
f(xnd, ynd)
def test_trace_expr_assign():
@tvm.register_func("tvm.tir.trace_callback2")
def trace_buffer(x):
return
def check_assign(dtype):
n = 4
x = te.placeholder((n, n, n), name="X", dtype=dtype)
y = te.compute(
x.shape, lambda i, j, k: tvm.tir.trace([x[i][j][k]], "tvm.tir.trace_callback2")
)
z = te.compute(
x.shape, lambda i, j, k: tvm.tir.trace([y[i][j][k]], "tvm.tir.trace_callback2")
)
s = te.create_schedule(z.op)
f = tvm.build(s, [x, y, z], "llvm")
xnd = tvm.nd.array(np.ones((n, n, n), dtype=x.dtype))
ynd = tvm.nd.array(np.zeros((n, n, n), dtype=y.dtype))
znd = tvm.nd.array(np.zeros((n, n, n), dtype=z.dtype))
f(xnd, ynd, znd)
assert np.array_equal(xnd.numpy(), np.ones((n, n, n)))
assert np.array_equal(ynd.numpy(), np.ones((n, n, n)))
assert np.array_equal(znd.numpy(), np.ones((n, n, n)))
for t in ["float64", "float32", "int64", "int32"]:
check_assign(t)
def test_trace_expr_sum_generated():
@tvm.register_func("tvm.tir.trace_callback3")
def trace_buffer(x):
return
def check_expr_sum(dtype):
n = 4
a = te.placeholder((n, n, n), name="a", dtype=dtype)
b = te.placeholder((n, n, n), name="b", dtype=dtype)
c = te.compute(
a.shape,
lambda i, j, k: tvm.tir.trace([a[i][j][k]], "tvm.tir.trace_callback3")
+ tvm.tir.trace([b[i][j][k]], "tvm.tir.trace_callback3"),
)
s = te.create_schedule(c.op)
f = tvm.build(s, [a, b, c])
x |
nd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=a.dtype)))
ynd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=b.dtype)))
znd = tvm.nd.array(np.zeros((n, n, n), dtype=c.dtype))
f(xnd, ynd, znd)
assert np.array_equal(znd.numpy(), xnd.numpy() + ynd.numpy())
for t in ["float64", "float32", "int64", "int32"]:
check_expr_sum(t)
def test_trace_expr_sum_args():
@tvm.register_func("tvm.tir.trace_silent")
def silent(*args):
return
def check_expr_sum(dtype):
n = 4
a = te.placeholder((n, n, n), name="a", dtype=dtype)
b = te.placeholder((n, n, n), name="b", dtype=dtype)
e = te.placeholder((n, n, n), name="e", dtype=dtype)
d = te.placeholder((n, n, n), name="d", dtype=dtype)
c = te.compute(
a.shape,
lambda i, j, k: tvm.tir.trace([i, j, k, a[i][j][k]], "tvm.tir.trace_silent")
+ tvm.tir.trace([i, j, k, b[i][j][k]], "tvm.tir.trace_silent")
+ tvm.tir.trace([i, j, k, d[i][j][k]], "tvm.tir.trace_silent")
+ tvm.tir.trace([i, j, k, e[i][j][k]], "tvm.tir.trace_silent"),
)
s = te.create_schedule(c.op)
f = tvm.build(s, [a, b, d, e, c])
a_nd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=a.dtype)))
b_nd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=b.dtype)))
d_nd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=d.dtype)))
e_nd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=e.dtype)))
c_nd = tvm.nd.array(np.zeros((n, n, n), dtype=c.dtype))
f(a_nd, b_nd, d_nd, e_nd, c_nd)
assert np.array_equal(
c_nd.numpy(), a_nd.numpy() + b_nd.numpy() + d_nd.numpy() + e_nd.numpy()
)
for t in ["float64", "float32", "int64", "int32"]:
check_expr_sum(t)
def test_trace_expr_sum_custom():
@tvm.register_func("tvm.tir.trace_callback4")
def trace_buffer(x):
return
def check_expr_sum_custom(dtype):
n = 4
a = te.placeholder((n, n |
), name="a", dtype=dtype)
b = te.placeholder((n, n), name="b", dtype=dtype)
c = te.compute(
a.shape,
lambda i, j: tvm.tir.trace([a[i][j]], "tvm.tir.trace_callback4")
+ tvm.tir.trace([b[i][j]], "tvm.tir.trace_callback4"),
)
s = te.create_schedule(c.op)
f = tvm.build(s, [a, b, c])
npa = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=a.dtype)
npb = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=a.dtype)
xnd = tvm.nd.array(npa)
ynd = tvm.nd.array(npb)
znd = tvm.nd.array(np.zeros((n, n), dtype=c.dtype))
f(xnd, ynd, znd)
assert np.array_equal(znd.numpy(), npa + npb)
for t in ["float64", "float32", "int64", "int32"]:
check_expr_sum_custom(t)
def test_trace_can_change_traced_value_int():
@tvm.register_func("tvm.tir.trace_change_int_first")
def trace_buffer(x):
return 13
@tvm.register_func("tvm.tir.trace_change_int_second")
def trace_buffer(x):
return 14
def check_assign(dtype):
n = 4
x = te.placeholder((n,), name="X", dtype=dtype)
y = te.compute(x.shape, lambda i: tvm.tir.trace([x[i]], "tvm.tir.trace_change_int_first"))
z = te.compute(x.shape, lambda i: tvm.tir.trace([y[i]], "tvm.tir.trace_change_int_second"))
s = te.create_schedule(z.op)
f = tvm.build(s, [x, y, z], "llvm")
xnd = tvm.nd.array(np.ones((n,), dtype=x.dtype))
ynd = tvm.nd.array(np.zeros((n,), dtype=y.dtype))
znd = tvm.nd.array(np.zeros((n,), dtype=z.dtype))
f(xnd, ynd, znd)
check_array_first = np.array([13, 13, 13, 13])
check_array_second = np.array([14, 14, 14, 14])
assert np.array_equal(ynd.numpy(), check_array_first)
assert np.array_equal(znd.numpy(), check_array_second)
for t in ["int64", "int32"]:
check_assign(t)
def test_trace_can_change_traced_value_float():
@tvm.register_func("tvm.tir.tra |
ce_change_float_first")
def trace_buffer(x):
return 13.0
@tvm.register_func("tvm.tir.trace_change_float_second")
def trace_buffer(x):
return 14.0
def check_assign(dtype):
n = 4
x = te.placeholder((n,), name="X", dtype=dtype)
y = te.compute(x.shape, lambda i: tvm.tir.trace([x[i]], "tvm.tir.trace_change_float_first"))
z = te.compute(
x.shape, lambda i: tvm.tir.trace([y[i]], "tvm.tir.trace_change_float_second")
)
s = te.create_schedule(z.op)
f = tvm.build(s, [x, y, z], "llvm")
xnd = tvm.nd.array(np.ones((n,), dtype=x.dtype))
ynd = tvm.nd.array(np.zeros((n,), dtype=y.dtype))
znd = tvm.nd.array(np.zeros((n,), dtype=z.dtype))
f(xnd, ynd, znd)
check_array_first = np.array([13.0, 13.0, 13.0, 13.0])
check_array_second = np.array([14.0, 14.0, 14.0, 14.0])
assert np.array_equal(ynd.numpy(), check_array_first)
assert np.array_equal(znd.numpy(), check_array_second)
for t in ["float64", "float32"]:
check_assign(t)
if __name__ == "__main__":
test_trace_expr_assign()
test_trace_expr_sum_generated()
test_trace_expr_sum_custom()
test_trace_expr_sum_args()
test_trace_default_action()
test_trace_can_change_traced_value_int()
test_trace_can_change_traced_value_float() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm.runtime import profiler_vm
from tvm import relay
from tvm.relay.testing import mlp
@tvm.testing.parametrize_targets
def test_basic(dev, target):
mod, params = mlp.get_workload(batch_size=1)
if not profiler_vm.enabled():
return
exe = relay.vm.compile(mod, target, params=params)
code, lib = exe.save()
des_exe = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = profiler_vm.VirtualMachineProfiler(des_exe, dev)
data = np.random.rand(1, 1, 28, 28).astype("float32")
res = vm.profile(tvm.nd.array(data), func_name="main")
assert "softmax" in str(res)
def test_vm_reshape_and_copy():
target = "llvm"
dev = tvm.gpu()
x_np = np.random.uniform(size=(8, 16)).astype("float32")
x = relay.var("x", shape=(8, 16), dtype="float32")
y = relay.reshape(x, [-1, 4, 8])
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
with tvm.transform.PassContext(opt_level=3):
exec = relay.vm.compile(mod, "llvm")
assert "reshape_tensor" in exec.bytecode
vm = profiler_vm.VirtualMachineProfiler(exec, dev)
vm.profile(tvm.nd.array(x_np))
if __name__ == "__main__":
tvm.testing.main()
|
import tvm |
import tvm.testing
from tvm.script |
import tir as T |
import pytest
@pytest.mark.xfail(reason="Awaiting TVMScript support for 'call_tir' token.", strict=True) |
class TestParseCallTIR(tvm.testing.CompareBeforeAfter):
"""
Simply confirm that the TIR node `call_tir` doesn't interfere with
the successful parsing of the TVMScript.
"""
def before():
T.call_tir(add_one)
T.evalute(0)
def expected():
T.evaluate(0)
transform = tvm.tir.transform.prim_func_pass(lambda func, _mod, _ctx: func, 0)
@pytest.mark.xfail(
reason="Awaiting TVMScript support for 'call_tir' and T.annotation(\"extract_as_subroutine\").",
strict=True,
) |
class TestAnnotateAndSliceTIR(tvm.testing.CompareBeforeAfter):
pass
@pytest.mark.xfail(
reason="Awaiting TVMScript support for lowering of 'T.call_tir' to 'T.call_packed'.",
strict=True,
) |
class TestLowerCallTir(tvm.testing.CompareBeforeAfter):
pass
@pytest.mark.xfail(reason="Awaiting end-to-end support for Primfunc slicing.", strict=True) |
class TestPrimfuncSlicingEndToEnd(tvm.testing.CompareBeforeAfter):
pass |
import tvm |
import tvm.testing |
import numpy as np
from tvm.script |
import tir as T
@T.prim_func
def reduce(a: T.handle, b: T.handle, d1: T.int32, d2: T.int32, d3: T.int32) -> None:
A = T.match_buffer(a, [1, d1, d2, d3])
B = T.match_buffer(b, [1, d1, d2])
for i, j, k, l in T.grid(1, d1, d2, d3):
with T.block("reduce"):
vi, vj, vk, vl = T.axis.remap("SSSR", [i, j, k, l])
with T.init():
B[vi, vj, vk] = 0.0
B[vi, vj, vk] = B[vi, vj, vk] + A[vi, vj, vk, vl]
@T.prim_func
def reduce_max(a: T.handle, b: T.handle, d1: T.int32, d2: T.int32, d3: T.int32) -> None:
A = T.match_buffer(a, [1, d1, d2, d3])
B = T.match_buffer(b, [1, d1, d2])
for i, j, k, l in T.grid(1, d1, d2, d3):
with T.block("reduce"):
vi, vj, vk, vl = T.axis.remap("SSSR", [i, j, k, l])
with T.init():
B[vi, vj, vk] = T.float32(-3.4028234663852886e38)
B[vi, vj, vk] = T.max(B[vi, vj, vk], A[vi, vj, vk, vl])
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_subwarp_reduction():
def check_sum(d1: int, d2: int, d3: int):
_, _, _d1, _d2, _d3 = reduce.params
mod = reduce.specialize({_d1: d1, _d2: d2, _d3: d3})
sch = tvm.tir.Schedule(mod)
blk = sch.get_block("reduce")
i, j, k, l = sch.get_loops(blk)
sch.bind(i, "blockIdx.x")
sch.bind(j, "threadIdx.z")
sch.bind(k, "threadIdx.y")
sch.bind(l, "threadIdx.x")
f = tvm.build(sch.mod["main"], target="cuda")
a_np = np.random.rand(1, d1, d2, d3).astype("float32")
b_np = a_np.sum(axis=-1).astype("float32")
a = tvm.nd.array(a_np, tvm.cuda(0))
b = tvm.nd.array(np.zeros_like(b_np), tvm.cuda(0))
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-6, atol=1e-6)
def check_max(d1: int, d2: int, d3: int):
_, _, _d1, _d2, _d3 = reduce_max.params
mod = reduce_max.specialize({_d1: d1, _d2: d2, _d3: d3})
sch = tvm.tir.Schedule(mod)
blk = sch.get_ |
block("reduce")
i, j, k, l = sch.get_loops(blk)
sch.bind(i, "blockIdx.x")
sch.bind(j, "threadIdx.z")
sch.bind(k, "threadIdx.y")
sch.bind(l, "threadIdx.x")
f = tvm.build(sch.mod["main"], target="cuda")
a_np = -np.random.rand(1, d1, d2, d3).astype("float32")
b_np = a_np.max(axis=-1).astype("float32")
a = tvm.nd.array(a_np, tvm.cuda(0))
b = tvm.nd.array(np.zeros_like(b_np), tvm.cuda(0))
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-6, atol=1e-6)
for d1 in range(1, 5):
for d2 in range(1, 5):
for d3 in range(2, 33):
check_sum(d1, d2, d3)
check_max(d1, d2, d3)
if __name__ == "__main__":
test_cuda_subwarp_reduction() |
import tvm
from tvm |
import te |
import re |
import os |
import ctypes
def test_popcount():
target = "llvm -mtriple=armv7l-none-linux-gnueabihf -mcpu=cortex-a53 -mattr=+neon"
def check_correct_assembly(type, elements, counts):
n = tvm.runtime.convert(elements)
A = te.placeholder(n, dtype=type, name="A")
B = te.compute(A.shape, lambda i: tvm.tir.popcount(A[i]), name="B")
s = te.create_schedule(B.op)
s[B].vectorize(s[B].op.axis[0])
f = tvm.build(s, [A, B], target)
assembly = f.get_source("asm")
matches = re.findall("vpaddl", assembly)
assert len(matches) == counts
matches = re.findall("vcnt", assembly)
assert len(matches) == 1
check_correct_assembly("uint16", 8, 1)
check_correct_assembly("uint16", 4, 1)
check_correct_assembly("uint32", 4, 2)
check_correct_assembly("uint32", 2, 2)
check_correct_assembly("uint64", 2, 3)
def test_vmlal_s16():
target = "llvm -mtriple=armv7l-none-linux-gnueabihf -mcpu=cortex-a53 -mattr=+neon"
def check_correct_assembly(N):
K = te.size_var("K")
A = te.placeholder((K, N), dtype="int8", name="A")
B = te.placeholder((K, N), dtype="int8", name="B")
k = te.reduce_axis((0, K))
C = te.compute(
(N,),
lambda n: te.sum(A[k, n].astype("int32") * B[k, n].astype("int32"), axis=[k]),
name="C",
)
s = te.create_schedule(C.op)
s[C].vectorize(s[C].op.axis[0])
f = tvm.build(s, [A, B, C], target)
assembly = f.get_source("asm")
matches = re.findall("vmlal.s16", assembly)
assert len(matches) == N
check_correct_assembly(8)
check_correct_assembly(16)
check_correct_assembly(32)
check_correct_assembly(64)
def check_broadcast_correct_assembly(N):
K = te.size_var("K")
A = te.placeholder((K, N), dtype="int8", name="A")
B = te.placeholder((K,), dtype="int8", name="B")
k = te.reduce_axis((0, K))
C = te.compute(
(N,), |
lambda n: te.sum(A[k, n].astype("int32") * B[k].astype("int32"), axis=[k]),
name="C",
)
s = te.create_schedule(C.op)
s[C].vectorize(s[C].op.axis[0])
f = tvm.build(s, [A, B, C], target)
assembly = f.get_source("asm")
matches = re.findall("vmlal.s16", assembly)
assert len(matches) == N
check_broadcast_correct_assembly(8)
check_broadcast_correct_assembly(16)
check_broadcast_correct_assembly(32)
check_broadcast_correct_assembly(64)
if __name__ == "__main__":
test_popcount()
test_vmlal_s16() |
import numpy as np
from tvm |
import relay
from tvm.relay |
import testing
from tvm.contrib |
import graph_executor |
import tvm
from tvm |
import te |
import ctypes |
import tvm.testing
@tvm.testing.uses_gpu
def test_synthetic():
for device in ["llvm", "cuda"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
input_shape = (1, 5, 23, 61)
def verify(data):
mod, params = relay.testing.synthetic.get_workload(input_shape=input_shape)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, "llvm", params=params)
dev = tvm.cpu()
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
out = module.get_output(0).numpy()
return out
synthetic_mod, synthetic_params = relay.testing.synthetic.get_workload(input_shape=input_shape)
with tvm.transform.PassContext(opt_level=3):
synthetic_gpu_lib = relay.build_module.build(synthetic_mod, "cuda", params=synthetic_params)
from tvm.contrib |
import utils
temp = utils.tempdir()
path_lib = temp.relpath("deploy_lib.so")
synthetic_gpu_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape).astype("float32")
dev = tvm.cuda()
module = graph_executor.GraphModule(loaded_lib["default"](dev))
module.set_input("data", data)
module.run()
out = module.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.uses_gpu
def test_cuda_lib():
dev = tvm.cuda(0)
for device in ["llvm", "cuda"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
nn = 12
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], factor=4)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
from tvm.contrib |
import utils
temp = utils.tempdir()
fn_add = tvm.build(s, [A, B], target="cuda --host=llvm", name="add")
path_lib = temp.relpath("deploy_lib.so")
fn_add.export_library(path_lib)
m = tvm.runtime.load_module(path_lib)
a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), dev)
m["add"](a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
if __name__ == "__main__":
test_synthetic()
test_cuda_lib() |
"""codegen related to bool types""" |
import tvm |
import tvm.testing
from tvm |
import te |
import numpy as np |
import tvm.testing
arr_size = tvm.testing.parameter(32)
@tvm.testing.fixture
def compute(arr_size):
A = te.placeholder((arr_size,), name="A")
B = te.placeholder((arr_size,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) > B(*i), name="C")
D = te.compute(C.shape, lambda *i: tvm.tir.all(C(*i), A(*i) > 1).astype("float32"), name="D")
return [A, B, C, D]
@tvm.testing.fixture
def schedule(target, compute):
target = tvm.target.Target(target)
A, B, C, D = compute
if target.kind.name == "llvm":
s = te.create_schedule(D.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
xo1, xo2 = s[C].split(xo, factor=13)
s[C].parallel(xo2)
else:
s = te.create_schedule(D.op)
for stage in [C, D]:
xo, xi = s[stage].split(stage.op.axis[0], factor=4)
s[stage].bind(xo, te.thread_axis("blockIdx.x"))
s[stage].bind(xi, te.thread_axis("threadIdx.x"))
return s
@tvm.testing.uses_gpu
def test_cmp_load_store(target, dev, arr_size, compute, schedule):
A, B, _, D = compute
f = tvm.build(schedule, [A, B, D], target)
a_np = np.random.uniform(size=arr_size).astype(A.dtype)
b_np = np.random.uniform(size=arr_size).astype(B.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
d = tvm.nd.array(np.zeros(arr_size, dtype=D.dtype), dev)
f(a, b, d)
np.testing.assert_equal(
d.numpy(),
np.logical_and(a_np > b_np, a_np > 1).astype("float32"),
)
if __name__ == "__main__":
tvm.testing.main() |
import tvm |
import tvm.testing
from tvm |
import te |
import numpy as np
from tvm.contrib |
import utils
def test_add():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
def check_c():
mhost = tvm.build(s, [A, B, C], "c", name="test_fadd")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
fadd = m["test_fadd"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_c()
def test_add_pipeline():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
AA = te.compute((n,), lambda *i: A(*i), name="A")
BB = te.compute((n,), lambda *i: B(*i), name="B")
T = te.compute(A.shape, lambda *i: AA(*i) + BB(*i), name="T")
C = te.compute(A.shape, lambda *i: T(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
xo1, xo2 = s[C].split(xo, factor=13)
s[C].parallel(xo2)
s[C].pragma(xo1, "parallel_launch_point")
s[C].pragma(xo2, "parallel_stride_pattern")
s[C].pragma(xo2, "parallel_barrier_when_finish")
s[C].vectorize(xi)
def check_c():
Ab = tvm.tir.decl_buffer(
A.shape, A.dtype, elem_offset=te.size_var("Aoffset"), offset_factor=8, name="A"
)
binds = {A: Ab}
f1 = tvm.lower(s, [A, B, C], name="test_fadd_pipeline")
mhost = tvm.build(f1, target="c")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso) |
fadd = m["test_fadd_pipeline"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_c()
def test_reinterpret():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A", dtype="int32")
B = te.compute(
A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.reinterpret", 2 + A(*i)), name="B"
)
s = te.create_schedule(B.op)
def check_c():
mhost = tvm.build(s, [A, B], "c", name="test_reinterpret")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
fadd = m["test_reinterpret"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.randint(-(2**30), 2**30, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
fadd(a, b)
tvm.testing.assert_allclose(b.numpy(), (2 + a.numpy()).view("float32"))
check_c()
def test_ceil():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A", dtype="float32")
B = te.compute(A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.ceil", A(*i)), name="B")
s = te.create_schedule(B.op)
def check_c():
mhost = tvm.build(s, [A, B], "c", name="test_ceil")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
fceil = m["test_ceil"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
fceil(a, b)
tvm.testing.assert_allclose(b.numpy(), (np.ceil(a.numpy()).view("floa |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.