text
stringlengths 1
2.05k
|
---|
import (
create_lower_func,
create_min_lower_func,
lower_call_pure_extern,
lower_ite,
register,
register_min_func,
register_op,
)
from tvm.tir.op |
import call_pure_extern
def get_mobilenet():
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision |
import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
return relay.frontend.from_mxnet(block, shape_dict)
def get_cat_image(dimensions):
from PIL |
import Image
from tvm.contrib.download |
import download_testdata
url = "https:
dst = "cat.png"
real_dst = download_testdata(url, dst, module="data")
img = Image.open(real_dst).resize(dimensions)
img_bgr = np.array(img)[:, :, ::-1]
img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
return np.asarray(img, dtype="float32")
np.random.seed(0)
def convert_ndarray(dst_dtype, array):
"""Converts NDArray(s) into the specified datatype"""
x = relay.var("x", shape=array.shape, dtype=str(array.dtype))
cast = relay.Function([x], x.astype(dst_dtype))
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
return relay.create_executor("graph").evaluate(cast)(array)
def change_dtype(src, dst, module, params):
"""Convert constants and functions in module from src type to dst type.
Returns changed module and converted params of type dst_type.
"""
module = relay.frontend.ChangeDatatype(src, dst)(module)
module = relay.transform.InferType()(module)
params = {k: convert_ndarray(dst, v) for k, v in params.items()}
return module, params
def compare(module, input, src_dtype, dst_dtype, rtol, atol, params={}, target="llvm"):
module = relay.transform.InferType()(module)
module = relay.transform.SimplifyInference()(module)
correct = relay.create_executor("graph", mod=module).evaluate()(*input, **params)
module, converted_params = change_dtype(src_dtype, dst_dtype, module, params)
x_converted = [convert_ndarray(dst_dtype, arr) for arr in input]
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
maybe_correct = relay.create_executor("graph", mod=module, target=target).evaluate()(
*x_converted, **converted_params
)
maybe_correct_converted = convert_ndarray(src_dtype, maybe_correct)
np.testing.assert_allclose(
maybe_correct_converted.numpy(), correct.numpy(), rtol=rtol, atol=atol
)
def setup_myfloat():
"""Set up tests for myfloat (a custom datatype |
that under the hood is float)
Currently, this registers some custom datatypes using the Bring Your
Own Datatypes framework.
"""
register("myfloat", 131)
register_op(
create_lower_func({(32, 32): "FloatToCustom32"}), "Cast", "llvm", "float", "myfloat"
)
register_op(
create_lower_func({(32, 32): "Custom32ToFloat"}), "Cast", "llvm", "myfloat", "float"
)
register_op(create_lower_func({32: "Custom32Add"}), "Add", "llvm", "myfloat")
register_op(
create_lower_func(
{
32: "Custom32Sub",
}
),
"Sub",
"llvm",
"myfloat",
)
register_op(create_lower_func({32: "Custom32Mul"}), "Mul", "llvm", "myfloat")
register_op(
create_lower_func(
{
32: "FloatToCustom32",
}
),
"FloatImm",
"llvm",
"myfloat",
)
register_op(
create_lower_func(
{
32: "Custom32Div",
}
),
"Div",
"llvm",
"myfloat",
)
register_op(create_lower_func({32: "Custom32Max"}), "Max", "llvm", "myfloat")
register_op(
create_lower_func({32: "Custom32Sqrt"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sqrt",
)
register_op(
create_lower_func({32: "Custom32Exp"}), "Call", "llvm", "myfloat", intrinsic_name="tir.exp"
)
register_op(
create_lower_func({32: "Custom32Log"}), "Call", "llvm", "myfloat", intrinsic_name="tir.log"
)
register_op(
create_lower_func({32: "Custom32Sigmoid"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sigmoid",
)
register_op(
create_lower_func({32: "Custom32Tanh"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.tanh",
)
register_op(lower_ite, "Call", "llvm", "myfloat", intrinsic_name="tir.if_then_else")
register_op |
(
lower_call_pure_extern, "Call", "llvm", "myfloat", intrinsic_name="tir.call_pure_extern"
)
register_min_func(create_min_lower_func({32: "MinCustom32"}, "myfloat"), "myfloat")
def setup_posites2():
"""Set up tests for posites2
Currently, this registers some custom datatypes using the Bring Your
Own Datatypes framework.
"""
register("posites2", 132)
register_op(
create_lower_func(
{
(32, 32): "FloatToPosit32es2",
(32, 16): "FloatToPosit16es2",
(32, 8): "FloatToPosit8es2",
}
),
"Cast",
"llvm",
"float",
"posites2",
)
register_op(
create_lower_func(
{
(32, 32): "Posit32es2ToFloat",
(16, 32): "Posit16es2ToFloat",
(8, 32): "Posit8es2ToFloat",
}
),
"Cast",
"llvm",
"posites2",
"float",
)
register_op(
create_lower_func({32: "Posit32es2Add", 16: "Posit16es2Add", 8: "Posit8es2Add"}),
"Add",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Sub", 16: "Posit16es2Sub", 8: "Posit8es2Sub"}),
"Sub",
"llvm",
"posites2",
)
register_op(
create_lower_func(
{32: "FloatToPosit32es2", 16: "FloatToPosit16es2", 8: "FloatToPosit8es2"}
),
"FloatImm",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Mul", 16: "Posit16es2Mul", 8: "Posit8es2Mul"}),
"Mul",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Div", 16: "Posit16es2Div", 8: "Posit8es2Div"}),
"Div",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Max", 16: "Posit16es2Max", 8: "Posit8es2Max"}),
"Max",
"llvm",
"posites2",
) |
register_op(
create_lower_func({32: "Posit32es2Sqrt", 16: "Posit16es2Sqrt", 8: "Posit8es2Sqrt"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.sqrt",
)
register_op(lower_ite, "Call", "llvm", "posites2", intrinsic_name="tir.if_then_else")
register_op(
lower_call_pure_extern, "Call", "llvm", "posites2", intrinsic_name="tir.call_pure_extern"
)
register_op(
create_lower_func({32: "Posit32es2Exp", 16: "Posit16es2Exp", 8: "Posit8es2Exp"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.exp",
)
register_op(
create_lower_func({32: "Posit32es2Log", 16: "Posit16es2Log", 8: "Posit8es2Log"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.log",
)
register_op(
create_lower_func(
{32: "Posit32es2Sigmoid", 16: "Posit16es2Sigmoid", 8: "Posit8es2Sigmoid"}
),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.sigmoid",
)
register_op(
create_lower_func({32: "Posit32es2Tanh", 16: "Posit16es2Tanh", 8: "Posit8es2Tanh"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.tanh",
)
register_min_func(
create_min_lower_func(
{32: "MinPosit32es2", 16: "MinPosit16es2", 8: "MinPosit8es2"}, "posites2"
),
"posites2",
)
def run_ops(src_dtype, dst_dtype, rtol=1e-7, atol=1e-7):
"""Run the same op, but with two different datatypes"""
shape1 = (5, 10, 5)
shape2 = (5,)
def check_unary_op(op, src_dtype, dst_dtype, shape):
t1 = relay.TensorType(shape, src_dtype)
x = relay.var("x", t1)
z = op(x)
x_data = np.random.rand(*shape).astype(t1.dtype)
module = tvm.IRModule.from_expr(relay.Function([x], z))
compare(module, (x_data,), src_dtype, dst_dtype, rtol, atol)
for op in [
relay.nn.softmax,
tvm.relay.log,
tvm.relay.exp, |
tvm.relay.sqrt,
tvm.relay.rsqrt,
tvm.relay.sigmoid,
tvm.relay.tanh,
relay.nn.relu,
relay.nn.batch_flatten,
]:
check_unary_op(op, src_dtype, dst_dtype, shape1)
for op in [relay.nn.max_pool2d, relay.nn.avg_pool2d, relay.nn.global_avg_pool2d]:
shape_2d = (3, 32, 32, 32)
check_unary_op(op, src_dtype, dst_dtype, shape_2d)
def check_binary_op(opfunc, src_dtype, dst_dtype):
t1 = relay.TensorType(shape1, src_dtype)
t2 = relay.TensorType(shape2, src_dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
x_data = np.random.rand(*shape1).astype(t1.dtype)
y_data = np.random.rand(*shape2).astype(t2.dtype)
module = tvm.IRModule.from_expr(relay.Function([x, y], z))
compare(module, (x_data, y_data), src_dtype, dst_dtype, rtol, atol)
for op in [
relay.add,
relay.subtract,
relay.divide,
relay.multiply,
]:
check_binary_op(op, src_dtype, dst_dtype)
def run_model(get_workload, input, src_dtype, dst_dtype, rtol=1e-4, atol=1e-4):
module, params = get_workload()
compare(module, input, src_dtype, dst_dtype, rtol, atol, params)
def run_conv2d(src_dtype, dst_dtype, rtol=1e-7, atol=1e-4):
def run_test_conv2d(
src_dtype,
dst_dtype,
scale,
dshape,
kshape,
padding=(1, 1),
groups=1,
dilation=(1, 1),
**attrs,
):
x = relay.var("x", shape=dshape, dtype=src_dtype)
w = relay.var("w", shape=kshape, dtype=src_dtype)
y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
module = tvm.IRModule.from_expr(relay.Function([x, w], y))
data = np.random.uniform(-scale, scale, size=dshape).astype(src_dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(src_dtype)
compare(module, (data, kernel), src_dtype, dst_dtype, rtol |
, atol)
dshape = (1, 32, 18, 18)
kshape = (32, 1, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=32,
groups=32,
kernel_size=(3, 3),
)
dshape = (1, 32, 18, 18)
kshape = (32, 4, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=32,
groups=8,
kernel_size=(3, 3),
)
dshape = (1, 32, 18, 18)
kshape = (64, 1, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=64,
groups=32,
kernel_size=(3, 3),
)
dshape = (1, 3, 224, 224)
kshape = (10, 3, 3, 3)
run_test_conv2d(
src_dtype, dst_dtype, 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3)
)
dshape = (1, 3, 18, 18)
kshape = (10, 3, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=10,
kernel_size=(3, 3),
dilation=(3, 3),
)
def run_batchnorm(src_dtype, dst_dtype, rtol=1e-6, atol=1e-6):
shape = (3, 32, 32)
t = relay.TensorType(shape, src_dtype)
x = relay.var("x", t)
bn = batch_norm_infer(data=x, epsilon=2e-5, scale=False, name="bn_x")
f = relay.Function(relay.analysis.free_vars(bn), bn)
x_data = np.random.rand(*shape).astype(t.dtype)
module = tvm.IRModule.from_expr(f)
zero_data = np.zeros((32), "float32")
compare(
module,
(x_data, zero_data, zero_data, zero_data, zero_data),
src_dtype,
dst_dtype,
rtol,
atol,
)
def test_myfloat():
try:
setup_myfloat()
except tvm._ffi.base.TVMError as e:
if "float is already registered" not in str(e):
raise e
run_ops("float32" |
, "custom[myfloat]32", rtol=1e-6, atol=1e-6)
run_conv2d("float32", "custom[myfloat]32", rtol=1e-6, atol=1e-6)
run_batchnorm("float32", "custom[myfloat]32", rtol=1e-6, atol=1e-6)
def _has_posit():
return tvm.support.libinfo()["USE_BYODT_POSIT"] == "ON"
@pytest.mark.skipif(not _has_posit(), reason="compiled with USE_BYODT_POSIT flag OFF")
def test_posites2():
setup_posites2()
run_ops("float32", "custom[posites2]8", rtol=1, atol=1)
run_ops("float32", "custom[posites2]16", rtol=0.01, atol=1)
run_ops("float32", "custom[posites2]32", rtol=1e-6, atol=1e-6)
run_conv2d("float32", "custom[posites2]8", rtol=1, atol=1)
run_conv2d("float32", "custom[posites2]16", rtol=0.01, atol=1)
run_conv2d("float32", "custom[posites2]32")
run_batchnorm("float32", "custom[posites2]8", rtol=1, atol=1)
run_batchnorm("float32", "custom[posites2]16", rtol=0.01, atol=1)
run_batchnorm("float32", "custom[posites2]32", rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
tvm.testing.main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
import pytest
import numpy as np
@pytest.mark.parametrize("dtype, rtol", [("float16", 1e-3), ("float32", 1e-7), ("float64", 1e-12)])
def test_div_to_mul(dtype, rtol):
x = relay.var("x", relay.TensorType((), dtype))
y = relay.Constant(tvm.nd.array(np.array([1.5]).astype(dtype)))
z = x / y
mod = tvm.IRModule.from_expr(z)
transformed = relay.transform.DivToMul()(mod)
assert transformed["main"].body.op.name == "multiply"
np.testing.assert_allclose(transformed["main"].body.args[1].data.numpy()[0], 1 / 1.5, rtol=rtol)
|
import os |
import shutil |
import subprocess |
import sys |
import tempfile
def setup_git_repo(worktree=False):
git_repo_dir = tempfile.mkdtemp()
to_rm = [git_repo_dir]
try:
subprocess.check_output(["git", "init", "."], cwd=git_repo_dir)
with open(f"{git_repo_dir}/committed", "w") as committed_f:
committed_f.write("normal committed file\n")
subprocess.check_output(["git", "add", "committed"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/committed-ignored", "w") as gitignore_f:
gitignore_f.write("this file is gitignored, but committed already")
subprocess.check_output(["git", "add", "committed-ignored"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/.gitignore", "w") as gitignore_f:
gitignore_f.write("ignored\n" "committed-ignored\n")
subprocess.check_output(["git", "add", ".gitignore"], cwd=git_repo_dir)
subprocess.check_output(
[
"git",
"-c",
"user.name=Unit Test",
"-c",
"[email protected]",
"commit",
"-m",
"initial commit",
],
cwd=git_repo_dir,
)
if worktree:
worktree_dir = tempfile.mkdtemp()
to_rm.append(worktree_dir)
subprocess.check_output(["git", "worktree", "add", worktree_dir], cwd=git_repo_dir)
git_repo_dir = worktree_dir
with open(f"{git_repo_dir}/ignored", "w") as gitignore_f:
gitignore_f.write("this file is gitignored")
with open(f"{git_repo_dir}/added-to-index", "w") as added_f:
added_f.write("only added to git index\n")
subprocess.check_output(["git", "add", "added-to-index"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/ignored-added-to-index", "w") as ignored_f:
ignored_f.write("this file is gitignored but in the index already\n")
subprocess.check_output(["git", "add", "-f", "ignored-added-to-index"], cwd=gi |
t_repo_dir)
with open(f"{git_repo_dir}/untracked", "w") as untracked_f:
untracked_f.write("this file is untracked\n")
os.mkdir(f"{git_repo_dir}/subdir")
with open(f"{git_repo_dir}/subdir/untracked", "w") as untracked_f:
untracked_f.write("this file is untracked\n")
with open(f"{git_repo_dir}/subdir/untracked2", "w") as untracked_f:
untracked_f.write("this file is also untracked\n")
return git_repo_dir, to_rm
except Exception:
for rm_dir in to_rm:
shutil.rmtree(rm_dir)
raise
def run_test(repo_path, passed_files, filtered_files):
test_input = (
"\n".join(
passed_files
+ filtered_files
+ [f"./{f}" for f in passed_files]
+ [f"./{f}" for f in filtered_files]
)
+ "\n"
)
test_script_dir = f"{repo_path}/test-script-dir"
os.mkdir(test_script_dir)
filter_script_path = f"{test_script_dir}/filter_untracked.py"
test_script_dirname = os.path.dirname(__file__) or os.getcwd()
shutil.copy(
os.path.realpath(f"{test_script_dirname}/../../lint/filter_untracked.py"),
filter_script_path,
)
filter_proc = subprocess.Popen(
[sys.executable, filter_script_path],
cwd=repo_path,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
)
filter_output, _ = filter_proc.communicate(test_input)
filter_output_lines = [l for l in filter_output.split("\n") if l]
for pass_f in passed_files:
assert (
pass_f in filter_output_lines
), f"expected in filter output: {pass_f}\filter output: {filter_output}"
assert (
f"./{pass_f}" in filter_output_lines
), f"expected in filter output: ./{pass_f}\filter output: {filter_output}"
for filter_f in filtered_files:
assert (
filter_f not in filter_output_lines
), f"expected not in filter output: {filter_f}\nfilter_output: {fil |
ter_output}"
assert (
f"./{filter_f}" not in filter_output_lines
), f"expected not in filter output: ./{filter_f}\nfilter_output: {filter_output}"
assert len(filter_output_lines) == 2 * len(
passed_files
), f"expected {len(filter_output_lines)} == 2 * {len(passed_files)}"
def test_filter_untracked():
repo_path, to_rm = setup_git_repo()
try:
passed_files = [
"committed",
"committed-ignored",
"added-to-index",
"ignored-added-to-index",
]
filtered_files = [
"ignored",
"untracked",
"subdir/untracked",
"subdir/untracked2",
]
run_test(repo_path, passed_files, filtered_files)
finally:
for rm_dir in to_rm:
shutil.rmtree(rm_dir)
def test_worktree():
repo_path, to_rm = setup_git_repo(worktree=True)
try:
passed_files = [
"committed",
"committed-ignored",
"added-to-index",
"ignored-added-to-index",
]
filtered_files = [
"ignored",
"untracked",
"subdir/untracked",
"subdir/untracked2",
".git",
]
run_test(repo_path, passed_files, filtered_files)
finally:
for rm_dir in to_rm:
shutil.rmtree(rm_dir)
if __name__ == "__main__":
test_filter_untracked()
test_worktree() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from numpy import isclose
import random
from tvm.autotvm import utils
SI_PREFIXES = "yzafpn\xb5m kMGTPEZY"
def test_format_si_prefix():
# test float conversion
assert utils.format_si_prefix(1024, "k") == 1.024
for i, prefix in enumerate(SI_PREFIXES):
integer, decimal = random.randint(0, 1000), random.randint(0, 1000)
exp = -24 + 3 * i # 0th prefix (yocto) is 10^-24
number = integer * (10**exp) + decimal * (10 ** (exp - 3))
expected = integer + decimal / 1000
assert isclose(utils.format_si_prefix(number, prefix), expected)
assert utils.format_si_prefix(0, "y") == 0
if __name__ == "__main__":
test_format_si_prefix()
|
"""Tests for gen_requirements, found in python/.""" |
import collections |
import contextlib |
import os |
import sys |
import tvm |
import tvm.testing |
import pytest
sys.path.insert(0, os.path.dirname(tvm.__file__))
try: |
import gen_requirements
finally:
sys.path.pop(0)
@contextlib.contextmanager
def patch(obj, **kw):
old = {}
for prop_name, new in kw.items():
old[prop_name] = getattr(obj, prop_name)
setattr(obj, prop_name, new)
yield
for prop_name, value in old.items():
setattr(obj, prop_name, value)
PROBLEM_REQUIREMENTS = [
("extras-pre-core", ("", ["foo", 123])),
(456, ("", ["foo", "bar"])),
("core", ("", ["foo"])),
("wrong-description-type", (None, ["foo"])),
("bad-value", None),
("bad-value-2", ("", ["foo"], 34)),
("invalid", ("", ["qux"])),
("extras-foo", ("", ["bar", "baz"])),
("invalid", ("", ["baz", None, 123])),
("unsorted", ("", ["qux", "bar", "foo"])),
("versioned_dep", ("", ["baz==1.2", "foo==^2.0", "buz<3", "bar>4"])),
("duplicate_dep", ("", ["buz", "buz", "foo"])),
("dev", ("", ["baz", "qux"])),
("extras-post-dev", ("", ["bar", "buzz"])),
]
def test_validate_requirements():
with patch(gen_requirements, REQUIREMENTS_BY_PIECE=None):
assert gen_requirements.validate_requirements_by_piece() == [
"must be list or tuple, see None"
]
with patch(gen_requirements, REQUIREMENTS_BY_PIECE=PROBLEM_REQUIREMENTS):
problems = gen_requirements.validate_requirements_by_piece()
assert problems == [
'piece extras-pre-core: must list after "core" (core must be first)',
"piece extras-pre-core: deps should be a list of strings, got ['foo', 123]",
"piece 456: must be str",
"piece wrong-description-type: description should be a string, got None",
(
'piece bad-value: should be formatted like ("bad-value", ("<requirements.txt '
'comment>", ["dep1", "dep2", ...])). got: None'
),
(
'piece bad-value-2: should be formatted like ("bad-value-2", '
'("<requirements.txt comment>", ["dep1", "dep2", ...])). got: (\'\', ' |
"['foo'], 34)"
),
"piece invalid: listed twice",
"piece invalid: deps should be a list of strings, got ['baz', None, 123]",
"piece unsorted: deps must be sorted. Correct order:\n ['bar', 'foo', 'qux']",
"piece versioned_dep: deps must be sorted. Correct order:\n ['bar>4', 'baz==1.2', 'buz<3', 'foo==^2.0']",
"piece versioned_dep: dependency baz==1.2 should not specify a version. Add it to CONSTRAINTS instead.",
"piece versioned_dep: dependency foo==^2.0 should not specify a version. Add it to CONSTRAINTS instead.",
"piece versioned_dep: dependency buz<3 should not specify a version. Add it to CONSTRAINTS instead.",
"piece versioned_dep: dependency bar>4 should not specify a version. Add it to CONSTRAINTS instead.",
"piece duplicate_dep: dependency buz listed twice",
'piece extras-post-dev: must list before "dev" (dev must be last)',
'pieces other than "core" and "dev" must appear in alphabetical order: '
"['bad-value', 'bad-value-2', 'duplicate_dep', 'extras-foo', 'extras-post-dev', "
"'extras-pre-core', 'invalid', 'invalid', 'unsorted', 'versioned_dep', "
"'wrong-description-type']",
]
TEST_REQUIREMENTS_BY_PIECE = (
("core", ("core tvm requirements", ("bar", "foo", "non-constrained"))),
("extra-one", ("requirements for one feature", ("baz", "qux"))),
("extra-two", ("requirements for two feature", ("buz", "qux", "semver-minor", "semver-patch"))),
("dev", ("requirements for dev", ("buz", "oof", "rab"))),
)
def test_validate_constraints():
with patch(
gen_requirements,
REQUIREMENTS_BY_PIECE=TEST_REQUIREMENTS_BY_PIECE,
CONSTRAINTS=(
("unlisted", "~=3"),
("double-specified", "<2"),
(
"double-specified",
"==3",
),
("bad-constraint", "1.2.0"),
("bad-semver-constraint", "i |
don't match the regex :P"),
("alpha-semver-constraint", "^foo.bar.23"),
),
):
problems = gen_requirements.validate_constraints()
assert problems == [
"unlisted: not specified in REQUIREMENTS_BY_PIECE",
"double-specified: not specified in REQUIREMENTS_BY_PIECE",
"double-specified: specified twice",
"double-specified: not specified in REQUIREMENTS_BY_PIECE",
"bad-constraint: not specified in REQUIREMENTS_BY_PIECE",
'bad-constraint: constraint "1.2.0" does not look like a valid constraint',
"bad-semver-constraint: not specified in REQUIREMENTS_BY_PIECE",
'bad-semver-constraint: constraint "i don\'t match the regex :P" does not look like a valid constraint',
"alpha-semver-constraint: not specified in REQUIREMENTS_BY_PIECE",
"alpha-semver-constraint: invalid semver constraint ^foo.bar.23",
"CONSTRAINTS entries should be in this sorted order: ['alpha-semver-constraint', 'bad-constraint', 'bad-semver-constraint', 'double-specified', 'double-specified', 'unlisted']",
]
TEST_CONSTRAINTS = (
("bar", "==1.0"),
("baz", ">2.3"),
("buz", "^1.3.0"),
("non-constrained", None),
("oof", "==0.3.4"),
("qux", "~=1.2.4"),
("semver-minor", "^0.2.2-patch2.post3+buildmeta"),
("semver-patch", "^0.0.2+bm"),
)
def test_join_requirements():
with patch(
gen_requirements,
REQUIREMENTS_BY_PIECE=TEST_REQUIREMENTS_BY_PIECE,
CONSTRAINTS=TEST_CONSTRAINTS,
):
requirements = gen_requirements.join_requirements()
assert requirements == collections.OrderedDict(
[
("core", ("core tvm requirements", ["bar==1.0", "foo", "non-constrained"])),
("extra-one", ("requirements for one feature", ["baz>2.3", "qux~=1.2.4"])),
(
"extra-two",
(
"requirements for two feature", |
[
"buz>=1.3.0,<2.0.0",
"qux~=1.2.4",
"semver-minor>=0.2.2-patch2.post3+buildmeta,<0.3.0",
"semver-patch>=0.0.2+bm,<0.0.3",
],
),
),
("dev", ("requirements for dev", ["buz>=1.3.0,<2.0.0", "oof==0.3.4", "rab"])),
(
"all-prod",
(
"Combined dependencies for all TVM pieces, excluding dev",
[
"bar==1.0",
"baz>2.3",
"buz>=1.3.0,<2.0.0",
"foo",
"non-constrained",
"qux~=1.2.4",
"semver-minor>=0.2.2-patch2.post3+buildmeta,<0.3.0",
"semver-patch>=0.0.2+bm,<0.0.3",
],
),
),
]
)
def test_semver():
problems = []
assert gen_requirements.parse_semver("C", "^1.2.0", problems) == (["1", "2", "0"], 0, 1)
assert problems == []
assert gen_requirements.parse_semver("C", "^0.2.0", problems) == (["0", "2", "0"], 1, 2)
assert problems == []
assert gen_requirements.parse_semver("C", "^0.0.0", problems) == (["0", "0", "0"], 0, 0)
assert problems == []
assert gen_requirements.parse_semver("C", "^0.a.0", problems) == ([], 0, 0)
assert problems == ["C: invalid semver constraint ^0.a.0"]
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm.ir |
import assert_structural_equal
from tvm.tir |
import IndexMap, IntImm, floordiv, floormod
from tvm.runtime |
import const
def assert_equal_index_map(map1: IndexMap, map2: IndexMap) -> None:
iters_1 = map1.map_indices(map2.initial_indices)
iters_2 = map2.final_indices
assert len(iters_1) == len(iters_2)
analyzer = tvm.arith.Analyzer()
for iter1, iter2 in zip(iters_1, iters_2):
assert analyzer.can_prove_equal(iter1, iter2)
def test_index_mapping():
index_map = IndexMap.from_func(lambda i: [i
assert_structural_equal(index_map.map_indices([0]), [0, 0])
assert_structural_equal(index_map.map_indices([3]), [0, 3])
assert_structural_equal(index_map.map_indices([4]), [1, 0])
assert_structural_equal(index_map.map_indices([42]), [10, 2])
assert_structural_equal(
index_map.map_indices([const(42, "int64")]), [const(10, "int64"), const(2, "int64")]
)
def test_shape_mapping():
index_map = IndexMap.from_func(lambda i: [i
assert_structural_equal(index_map.map_shape([4]), [1, 4])
assert_structural_equal(index_map.map_shape([16]), [4, 4])
assert_structural_equal(index_map.map_shape([14]), [4, 4])
assert_structural_equal(
index_map.map_shape([const(16, "int64")]), [const(4, "int64"), const(4, "int64")]
)
assert_structural_equal(
index_map.map_shape([const(14, "int64")]), [const(4, "int64"), const(4, "int64")]
)
def test_inverse():
index_map = IndexMap.from_func(lambda i: [i
expected_inverse = IndexMap.from_func(lambda i, j: [4 * i + j])
assert index_map.inverse([16]).is_equivalent_to(expected_inverse)
def test_nonbijective_inverse_gives_error():
index_map = IndexMap.from_func(lambda i: [i
with pytest.raises(tvm.TVMError):
index_map.inverse([14])
dynamic_N = tvm.tir.Var("N", "int32")
padding_test_case = tvm.testing.parameter(
by_dict={
"no_padding": dict(
forward=lambda i: [i
inverse=lambda i, j: [4 * i + j],
pre_shape=[16],
post_shape=[4, 4],
padding=lambda i, j: tvm.runtime.convert(False),
), |
"right_padding": dict(
forward=lambda i: [i
inverse=lambda i, j: [4 * i + j],
pre_shape=[15],
post_shape=[4, 4],
padding=lambda i, j: tvm.tir.And(i == 3, tvm.runtime.convert(3) == j),
),
"left_padding": dict(
forward=lambda i: [(i + 1)
inverse=lambda i, j: [4 * i + j - 1],
pre_shape=[15],
post_shape=[4, 4],
padding=lambda i, j: tvm.tir.And(i == 0, j < 1),
),
"left_and_right_padding": dict(
forward=lambda i: [(i + 1)
inverse=lambda i, j: [4 * i + j - 1],
pre_shape=[14],
post_shape=[4, 4],
padding=lambda i, j: tvm.tir.Or(
tvm.tir.And(i == 0, j < 1),
tvm.tir.And(i == 3, tvm.runtime.convert(3) == j),
),
),
"dynamic_size": dict(
forward=lambda i: [i
inverse=lambda i, j: [4 * i + j],
pre_shape=[dynamic_N],
post_shape=[(dynamic_N - dynamic_N % (-4))
padding=lambda i, j: tvm.tir.And(
dynamic_N % (-4) != 0,
tvm.tir.And(i == dynamic_N
),
),
"2d_padding": dict(
forward=lambda i, j: [(i + 1)
inverse=lambda i_outer, j_outer, i_inner, j_inner: [
4 * i_outer + i_inner - 1,
8 * j_outer + j_inner - 5,
],
pre_shape=[14, 31],
post_shape=[
4,
5,
4,
8,
],
padding=lambda i_outer, j_outer, i_inner, j_inner: tvm.tir.Or(
tvm.tir.Or(
tvm.tir.And(i_outer == 0, i_inner < 1),
tvm.tir.And(i_outer == 3, tvm.runtime.convert(3) == i_inner),
),
tvm.tir.Or(
tvm.tir.And(j_outer == 0, j_inner < 5),
tvm.tir.And(j_outer == 4, j_inner >= 4), |
),
),
),
"multiple_right_padding": dict(
forward=lambda i: [i
inverse=lambda i, j, k: [32 * i + 4 * j + k],
pre_shape=[116],
post_shape=[4, 8, 4],
padding=lambda i, j, k: tvm.tir.And(i == 3, 4 * j + k >= 20),
),
"multiple_right_padding_transpose": dict(
forward=lambda i: [(i
inverse=lambda j, i, k: [32 * i + 4 * j + k],
pre_shape=[116],
post_shape=[8, 4, 4],
padding=lambda j, i, k: tvm.tir.And(i == 3, 4 * j + k >= 20),
),
"multiple_left_padding": dict(
forward=lambda i: [(i + 5)
inverse=lambda i, j, k: [32 * i + 4 * j + k - 5],
pre_shape=[123],
post_shape=[4, 8, 4],
padding=lambda i, j, k: tvm.tir.And(i == 0, j * 4 + k < 5),
),
"multiple_left_padding_with_transpose": dict(
forward=lambda i: [((i + 5)
inverse=lambda j, i, k: [32 * i + 4 * j + k - 5],
pre_shape=[123],
post_shape=[8, 4, 4],
padding=lambda j, i, k: tvm.tir.And(i == 0, j * 4 + k < 5),
),
"outer_loop_extent_one": dict(
forward=lambda i: [i
inverse=lambda i, j: [i * 4 + j],
pre_shape=[3],
post_shape=[1, 4],
padding=lambda i, j: tvm.runtime.convert(3) == j,
),
}
)
def test_nonsurjective_inverse(padding_test_case):
index_map = IndexMap.from_func(padding_test_case["forward"])
inverse, padding_predicate = index_map.non_surjective_inverse(padding_test_case["pre_shape"])
expected_inverse = IndexMap.from_func(padding_test_case["inverse"])
assert inverse.is_equivalent_to(expected_inverse)
post_shape = index_map.map_shape(padding_test_case["pre_shape"])
tvm.ir.assert_structural_equal(post_shape, padding_test_case["post_shape"])
expected_predicate = padding_test_case["padding"](*inverse.initial_indices)
an |
alyzer = tvm.arith.Analyzer()
expected_predicate = analyzer.simplify(expected_predicate)
padding_predicate = analyzer.simplify(padding_predicate)
tvm.ir.assert_structural_equal(padding_predicate, expected_predicate)
def test_index_map_inverse_no_iter():
def input_example(i0, i1, i2, i3):
j0 = floordiv(i3, 32)
j1 = floordiv(i2, 2)
j2 = floormod(i2, 2)
j3 = floormod(i3, 32)
return j0, j1, j2, j3
def expected_inverse(i0, i1, i2, i3):
return IntImm("int32", 0), IntImm("int32", 0), i2 + i1 * 2, i3 + i0 * 32
index_map = IndexMap.from_func(input_example)
inverse_map = index_map.inverse([1, 1, 64, 64])
expected_map = IndexMap.from_func(expected_inverse)
assert expected_map.is_equivalent_to(inverse_map)
def test_map_ndarray():
index_map = IndexMap.from_func(lambda i: [i
inp = np.arange(16).astype("int8")
out = index_map.map_ndarray(tvm.nd.array(inp)).numpy()
ref = np.zeros(out.shape).astype("int8")
for i in range(16):
ref[i
np.testing.assert_equal(ref, out)
index_map = IndexMap.from_func(lambda i0, i1, i2, i3: (i3, i0, i1, i2))
inp = np.random.randn(10, 10, 10, 10).astype("float16")
out = index_map.map_ndarray(tvm.nd.array(inp)).numpy()
ref = np.transpose(inp, (3, 0, 1, 2))
np.testing.assert_equal(ref, out)
index_map = IndexMap.from_func(
lambda i0, i1, i2, i3: (
floordiv(i3, 32),
i0,
floordiv(i2, 8),
floordiv(floormod(i3, 32), 16),
i1,
floormod(i2, 8),
floormod(i3, 16),
)
)
kH = kW = 3
I = 64
O = 64
inp = np.random.randn(kH, kW, I, O).astype("float32")
arr = tvm.nd.array(inp)
out = index_map.map_ndarray(arr).numpy()
ref = np.zeros(out.shape).astype("float32")
for i0 in range(kH):
for i1 in range(kW):
for i2 in range(I):
for i3 in range(O):
v = inp[i0, i1, i2, i3] |
ref[i3
np.testing.assert_equal(ref, out)
inverse_map = index_map.inverse(inp.shape)
np.testing.assert_equal(inverse_map.map_ndarray(index_map.map_ndarray(arr)).numpy(), inp)
if __name__ == "__main__":
tvm.testing.main() |
import tvm |
import pytest |
import tvm.ir._ffi_api
def test_make_attrs():
with pytest.raises(AttributeError):
x = tvm.ir.make_node("attrs.TestAttrs", unknown_key=1, name="xx")
with pytest.raises(AttributeError):
x = tvm.ir.make_node("attrs.TestAttrs", axis=100, name="xx")
x = tvm.ir.make_node("attrs.TestAttrs", name="xx", padding=(3, 4))
assert x.name == "xx"
assert x.padding[0].value == 3
assert x.padding[1].value == 4
assert x.axis == 10
def test_dict_attrs():
dattr = tvm.ir.make_node("DictAttrs", x=1, y=10, name="xyz", padding=(0, 0))
assert dattr.x.value == 1
datrr = tvm.ir.load_json(tvm.ir.save_json(dattr))
assert dattr.name == "xyz"
assert isinstance(dattr, tvm.ir.DictAttrs)
assert "name" in dattr
assert dattr["x"].value == 1
assert len(dattr) == 4
assert len([x for x in dattr.keys()]) == 4
assert len(dattr.items()) == 4
def test_attrs_equal():
dattr0 = tvm.ir.make_node("DictAttrs", x=1, y=[10, 20])
dattr1 = tvm.ir.make_node("DictAttrs", y=[10, 20], x=1)
dattr2 = tvm.ir.make_node("DictAttrs", x=1, y=None)
assert tvm.ir.structural_equal(dattr0, dattr1)
assert not tvm.ir.structural_equal(dattr0, dattr2)
assert not tvm.ir.structural_equal({"x": 1}, tvm.runtime.convert(1))
assert not tvm.ir.structural_equal([1, 2], tvm.runtime.convert(1))
if __name__ == "__main__":
test_make_attrs()
test_dict_attrs()
test_attrs_equal() |
import pytest |
import tvm
from tvm |
import te |
import numpy as np
def test_array():
a = tvm.runtime.convert([1, 2, 3])
assert len(a) == 3
assert a[-1].value == 3
a_slice = a[-3:-1]
assert (a_slice[0].value, a_slice[1].value) == (1, 2)
def test_array_save_load_json():
a = tvm.runtime.convert([1, 2, 3])
json_str = tvm.ir.save_json(a)
a_loaded = tvm.ir.load_json(json_str)
assert a_loaded[1].value == 2
def test_dir_array():
a = tvm.runtime.convert([1, 2, 3])
assert dir(a)
def test_getattr_array():
a = tvm.runtime.convert([1, 2, 3])
assert getattr(a, "type_key") == "Array"
assert not hasattr(a, "test_key")
def test_map():
a = te.var("a")
b = te.var("b")
amap = tvm.runtime.convert({a: 2, b: 3})
assert a in amap
assert len(amap) == 2
dd = dict(amap.items())
assert a in dd
assert b in dd
assert a + 1 not in amap
assert {x for x in amap} == {a, b}
assert set(amap.keys()) == {a, b}
assert set(amap.values()) == {2, 3}
def test_str_map():
amap = tvm.runtime.convert({"a": 2, "b": 3})
assert "a" in amap
assert len(amap) == 2
dd = dict(amap.items())
assert amap["a"].value == 2
assert "a" in dd
assert "b" in dd
def test_map_save_load_json():
a = te.var("a")
b = te.var("b")
amap = tvm.runtime.convert({a: 2, b: 3})
json_str = tvm.ir.save_json(amap)
amap = tvm.ir.load_json(json_str)
assert len(amap) == 2
dd = {kv[0].name: kv[1].value for kv in amap.items()}
assert dd == {"a": 2, "b": 3}
def test_dir_map():
a = te.var("a")
b = te.var("b")
amap = tvm.runtime.convert({a: 2, b: 3})
assert dir(amap)
def test_getattr_map():
a = te.var("a")
b = te.var("b")
amap = tvm.runtime.convert({a: 2, b: 3})
assert getattr(amap, "type_key") == "Map"
assert not hasattr(amap, "test_key")
def test_in_container():
arr = tvm.runtime.convert(["a", "b", "c"])
assert "a" in arr
assert tvm.tir.StringImm("a") in arr
assert "d" not in arr
def test_ndarray_container():
x |
= tvm.nd.array([1, 2, 3])
arr = tvm.runtime.convert([x, x])
assert arr[0].same_as(x)
assert arr[1].same_as(x)
assert isinstance(arr[0], tvm.nd.NDArray)
if __name__ == "__main__":
pytest.main([__file__]) |
"""Test type nodes in the IR""" |
import tvm
def check_json_roundtrip(node):
json_str = tvm.ir.save_json(node)
back = tvm.ir.load_json(json_str)
assert tvm.ir.structural_equal(back, node, map_free_vars=True)
def test_prim_type():
x = tvm.ir.PrimType("int32")
assert isinstance(x, tvm.ir.PrimType)
assert x.dtype == "int32"
def test_tensor_type_bad_constructor():
try:
x = tvm.ir.TensorType("xx", "xx")
except tvm.error.TVMError:
pass
def test_tensor_type():
shape = tvm.runtime.convert([1, 2, 3])
dtype = "float32"
tt = tvm.ir.TensorType(shape, dtype)
assert tt.dtype == dtype
assert tt.shape == shape
assert tt.span == None
str(tt)
check_json_roundtrip(tt)
def test_type_param():
tp = tvm.ir.TypeVar("name", tvm.ir.TypeKind.Type)
assert tp.kind == tvm.ir.TypeKind.Type
str(tp)
check_json_roundtrip(tp)
def test_func_type():
type_params = tvm.runtime.convert([])
type_constraints = tvm.runtime.convert([])
arg_types = tvm.runtime.convert([])
ret_type = tvm.ir.TensorType((1, 2, 3), "float32")
tf = tvm.ir.FuncType(arg_types, ret_type, type_params, type_constraints)
assert tf.type_params == type_params
assert tf.type_constraints == type_constraints
assert tf.arg_types == arg_types
assert tf.ret_type == ret_type
assert tf.span == None
str(tf)
check_json_roundtrip(tf)
def test_tuple_type():
tp = tvm.ir.TypeVar("tp", tvm.ir.TypeKind.Type)
tf = tvm.ir.FuncType([], tvm.ir.TupleType([]), [], [])
tt = tvm.ir.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
fields = tvm.runtime.convert([tp, tf, tt])
tup_ty = tvm.ir.TupleType(fields)
assert tup_ty.fields == fields
str(tup_ty)
check_json_roundtrip(tup_ty)
def test_type_relation():
tp = tvm.ir.TypeVar("tp", tvm.ir.TypeKind.Type)
tf = tvm.ir.FuncType([], None, [], [])
tt = tvm.ir.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
args = tvm.runtime.convert([tp, tf, tt])
num_inputs = 2
func = |
tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
attrs = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
tr = tvm.ir.TypeRelation(func, args, num_inputs, attrs)
assert tr.args == args
assert tr.num_inputs == num_inputs
str(tr)
check_json_roundtrip(tr)
if __name__ == "__main__":
test_tensor_type_bad_constructor()
test_tensor_type()
test_type_param()
test_func_type()
test_tuple_type()
test_type_relation() |
import collections |
import ctypes |
import json |
import os |
import re
from contextlib |
import redirect_stderr
from io |
import StringIO |
import numpy as np |
import tvm |
import tvm.relay |
import tvm.testing
from tvm |
import meta_schedule as ms
from tvm |
import relay
from tvm.contrib |
import utils
from tvm.relay.backend |
import Executor, Runtime
INPUT_SHAPE = (1, 3, 16, 16)
KERNEL_SHAPE = (3, 3, 3, 3)
linkable_dtype = tvm.testing.parameter(
*(
[f"uint{b}" for b in (8, 16, 32, 64)]
+ [f"int{b}" for b in (8, 16, 32, 64)]
+ ["float32", "float64"]
)
)
def dtype_info(dtype):
"""Lookup numpy type info for the given string dtype (of linkable_dtype params above)."""
if "int" in dtype:
return np.iinfo(getattr(np, dtype))
else:
return np.finfo(getattr(np, dtype))
RANDOM_TENSOR_START = None
def _make_random_tensor(dtype, shape):
"""Create a random test tensor with given shape and dtype."""
global RAND_SEED
if RANDOM_TENSOR_START is not None:
to_return = np.arange(
RANDOM_TENSOR_START, RANDOM_TENSOR_START + np.prod(shape), dtype=dtype
).reshape(shape)
RAND_SEED += np.prod(shape)
return to_return
dinfo = dtype_info(dtype)
if "int" in dtype:
return np.random.randint(dinfo.min, dinfo.max, shape, dtype=dtype)
else:
to_return = np.random.uniform(0, dinfo.max, shape).astype(dtype)
np.reshape(to_return, np.prod(shape))[::2] *= -1
return to_return
def _lookup_sid(graph, name):
"""Lookup the storage id of a named parameter.
Arguments
---------
graph : dict
Parsed JSON graph.
name : str
Name of the tensor parameter to lookup.
Returns
-------
int :
The storage_id of the parameter.
"""
num_outputs_seen = 0
for i, n in enumerate(graph["nodes"]):
if n["name"] == name:
print("sid", name, graph["attrs"]["storage_id"][1], num_outputs_seen)
return graph["attrs"]["storage_id"][1][num_outputs_seen]
else:
if "attrs" in n and "num_outputs" in n["attrs"]:
num_outputs_seen += int(n["attrs"]["num_outputs"])
else:
num_outputs_seen += 1
raise KeyError(f"no such param: {name}")
def _get_ctypes_dtype(dt):
"""Return a ctype |
s c_* datatype given a string data type."""
if "int" in dt:
return getattr(ctypes, f"c_{dt}")
elif dt == "float32":
return ctypes.c_float
elif dt == "float64":
return ctypes.c_double
else:
assert False, f"unknown dtype: {dt}"
def _verify_linked_param(dtype, lib, mod, graph, name):
"""Directly read memory from the linked library to verify the linked parameter is correct."""
sid = _lookup_sid(graph, name)
param_ptr = mod.get_function("_lookup_linked_param", True)(sid)
gen_param = lib.params[name]
arr_data = (_get_ctypes_dtype(dtype) * np.prod(gen_param.shape)).from_address(param_ptr.value)
arr = np.ndarray(shape=gen_param.shape, dtype=gen_param.dtype, buffer=arr_data, order="C")
if "int" in gen_param.dtype:
np.testing.assert_equal(gen_param.numpy(), arr)
else:
np.testing.assert_allclose(gen_param.numpy(), arr)
return dtype == gen_param.dtype
def _make_mod_and_params(dtype):
"""Create a Relay module and parameters to test the given datatype."""
param_decls = collections.OrderedDict()
param_init = {}
def _add_decl(name, dtype):
param_decls[name] = f"%{name} : Tensor[{KERNEL_SHAPE}, {dtype}]"
param_init[name] = _make_random_tensor(dtype, KERNEL_SHAPE)
_add_decl(f"{dtype}_a", dtype)
_add_decl(f"{dtype}_b", dtype)
mod_lines = [
'
f"def @main(%rand_input : Tensor[{INPUT_SHAPE}, {dtype}], { ', '.join(param_decls.values()) } ) {{",
(
f' %0 = nn.conv2d(%rand_input, %{dtype}_a, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
(
f' %1 = nn.conv2d(%0, %{dtype}_a, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
(
f' %2 = nn.conv2d(%1, %{dtype}_a, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3 |
], out_dtype="{dtype}");'
),
(
f' %3 = nn.conv2d(%2, %{dtype}_b, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
" %3",
"}",
]
mod = tvm.parser.fromtext("\n".join(mod_lines))
return mod, param_init
@tvm.testing.requires_llvm
def test_llvm_link_params(linkable_dtype):
ir_mod, param_init = _make_mod_and_params(linkable_dtype)
rand_input = _make_random_tensor(linkable_dtype, INPUT_SHAPE)
main_func = ir_mod["main"]
target = "llvm"
runtime = Runtime("crt", {"system-lib": True})
executor = Executor("graph", {"link-params": True})
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(ir_mod, target, runtime=runtime, executor=executor, params=param_init)
temp_dir = utils.TempDirectory()
export_file = temp_dir / "lib.so"
lib.lib.export_library(export_file)
mod = tvm.runtime.load_module(export_file)
assert len(lib.params.keys()) == 0
assert mod.get_function("TVMSystemLibEntryPoint") != None
graph = json.loads(lib.graph_json)
for p in lib.params:
_verify_linked_param(linkable_dtype, lib, mod, graph, p) or found_one
def _run_linked(lib, mod):
graph_json, _, _ = lib
graph_rt = tvm.contrib.graph_executor.create(graph_json, mod, tvm.cpu(0))
graph_rt.set_input("rand_input", rand_input)
graph_rt.run()
return graph_rt.get_output(0)
linked_output = _run_linked(lib, mod)
runtime = Runtime("cpp", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(ir_mod, "llvm", runtime=runtime, params=param_init)
def _run_unlinked(lib):
graph_json, mod, lowered_params = lib
graph_rt = tvm.contrib.graph_executor.create(graph_json, mod, tvm.cpu(0))
graph_rt.set_input("rand_input", rand_input, ** |
lowered_params)
graph_rt.run()
return graph_rt.get_output(0)
unlinked_output = _run_unlinked(lib)
if "int" in linkable_dtype:
np.testing.assert_equal(unlinked_output.numpy(), linked_output.numpy())
else:
np.testing.assert_allclose(unlinked_output.numpy(), linked_output.numpy())
def _get_c_datatype(dtype):
"""Translate LINKABLE_DTYPES element to c datatype."""
if "int" in dtype:
return f"{dtype}_t"
elif dtype == "float32":
return "float"
elif dtype == "float64":
return "double"
else:
assert False, f"unknown dtype {dtype}"
HEX_NUM_RE = re.compile(r"[+\-]?(?:(?:0x[0-9A-Fa-f.p+-]+)|(?:INFINITY)|(?:NAN))")
def test_c_link_params(linkable_dtype):
temp_dir = utils.tempdir()
mod, param_init = _make_mod_and_params(linkable_dtype)
rand_input = _make_random_tensor(linkable_dtype, INPUT_SHAPE)
main_func = mod["main"]
target = "c"
executor = Executor("graph", {"link-params": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lib = tvm.relay.build(mod, target, executor=executor, params=param_init)
assert len(lib.params.keys()) == 0
src = lib.lib.get_source()
lib.lib.save(temp_dir.relpath("test.c"), "c")
c_dtype = _get_c_datatype(linkable_dtype)
src_lines = src.split("\n")
param = param_init[f"{linkable_dtype}_a"].reshape(np.prod(KERNEL_SHAPE))
param_def = rf"^static const {c_dtype} __attribute__\(\(section\(\".rodata.tvm\"\), aligned\(16\)\)\) [a-zA-Z_0-9]*constant_\d+\[{np.prod(param.shape)}\] = {{$"
for i, line in enumerate(src_lines):
if re.match(param_def, line):
i += 1
break
else:
assert False, f'did not find parameter definition "{param_def}":\n{src}'
cursor = 0
width = dtype_info(linkable_dtype).bits
if linkable_dtype.startswith("int"):
width += 1
while |
"};" not in src_lines[i]:
for match in HEX_NUM_RE.finditer(src_lines[i]):
cursor += 1
i += 1
assert cursor == np.prod(param.shape)
lib_path = temp_dir.relpath(f"test-{linkable_dtype}-linked.so")
lib["remove_params"]().export_library(lib_path)
lib_mod = tvm.runtime.load_module(lib_path)
graph = json.loads(lib.graph_json)
for p in lib.params:
_verify_linked_param(linkable_dtype, lib, lib_mod, graph, p)
def _run_linked(lib_mod):
graph_rt = tvm.contrib.graph_executor.GraphModule(lib_mod["default"](tvm.cpu(0)))
graph_rt.set_input("rand_input", rand_input)
graph_rt.run()
return graph_rt.get_output(0)
linked_output = _run_linked(lib_mod)
linked_params = lib.params
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lib = tvm.relay.build(mod, "c", params=param_init)
_, _, params = lib
lib_path = temp_dir.relpath(f"test-{linkable_dtype}-unlinked.so")
lib.export_library(lib_path)
lib_mod = tvm.runtime.load_module(lib_path)
def _run_unlinked(lib_mod):
graph_rt = tvm.contrib.graph_executor.GraphModule(lib_mod["default"](tvm.cpu(0)))
graph_rt.set_input("rand_input", rand_input, **params)
graph_rt.run()
return graph_rt.get_output(0)
unlinked_output = _run_unlinked(lib_mod)
if "int" in linkable_dtype:
np.testing.assert_equal(unlinked_output.numpy(), linked_output.numpy())
else:
np.testing.assert_allclose(unlinked_output.numpy(), linked_output.numpy())
@tvm.testing.requires_micro
def test_crt_link_params(linkable_dtype):
from tvm |
import micro
mod, param_init = _make_mod_and_params(linkable_dtype)
rand_input = _make_random_tensor(linkable_dtype, INPUT_SHAPE)
main_func = mod["main"]
target = "c"
runtime = Runtime("crt", {"system-lib": True})
executor = Executor("graph", {"link-params": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory = tvm.relay.build(
mod, target, runtime=runtime, executor=executor, params=param_init
)
assert len(factory.get_params().keys()) == 0
temp_dir = tvm.contrib.utils.tempdir()
template_project_dir = os.path.join(tvm.micro.get_standalone_crt_dir(), "template", "host")
project = tvm.micro.generate_project(
template_project_dir, factory, temp_dir / "project", {"verbose": 1}
)
project.build()
project.flash()
with tvm.micro.Session(project.transport()) as sess:
graph_rt = tvm.micro.session.create_local_graph_executor(
factory.get_graph_json(), sess.get_system_lib(), sess.device
)
assert len(factory.params.keys()) == 0
graph_rt.set_input("rand_input", rand_input)
graph_rt.run()
linked_output = graph_rt.get_output(0).numpy()
runtime = Runtime("cpp", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(mod, "llvm", runtime=runtime, params=param_init)
def _run_unlinked(lib):
graph_json, mod, lowered_params = lib
graph_rt = tvm.contrib.graph_executor.create(graph_json, mod, tvm.cpu(0))
graph_rt.set_input("rand_input", rand_input, **lowered_params)
graph_rt.run()
return graph_rt.get_output(0).numpy()
unlinked_output = _run_unlinked(lib)
if "int" in linkable_dtype:
np.testing.assert_equal(unlinked_output, linked_output)
else:
np.testing.assert_allclose(unlinked_output, linked_output)
d |
ef test_tir_link_params():
def get_dense(data_shape, weight_shape):
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
dense = relay.nn.dense(data, weight)
return relay.Function([data, weight], dense)
def get_ref_dense(data_np, weight_np):
return np.dot(data_np, np.transpose(weight_np))
def schedule_dense(sch):
dense = sch.get_block("T_matmul_NT")
_y, _x, _k = sch.get_loops(dense)
M, N, K = 128, 128, 128
data_shape = (M, K)
weight_shape = (N, K)
relay_mod = tvm.IRModule.from_expr(get_dense(data_shape, weight_shape))
relay_mod = relay.transform.InferType()(relay_mod)
data_np = np.random.randn(*data_shape).astype("float32")
weight_np = np.random.randn(*weight_shape).astype("float32")
target = "llvm"
params = {"weight": weight_np}
def schedule_fn(sch):
if "nn_dense" in sch.mod.attrs["task_name"]:
schedule_dense(sch)
return True
return False
with StringIO() as stderr_buf, redirect_stderr(stderr_buf):
with ms.database.ScheduleFnDatabase(schedule_fn), tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_meta_schedule": True},
):
executor = Executor("graph", {"link-params": True})
lib = relay.build(relay_mod, target=target, executor=executor)
assert not "Cannot find workload" in stderr_buf.getvalue()
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input(**params)
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = get_ref_dense(data_np, weight_np)
tvm.testing.assert_allclose(out, ref, atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm
from tvm |
import te
from tvm.ir.module |
import IRModule
from tvm.script |
import tir as T |
import tvm.testing
def _check_module_with_numpy(mod, shape=(128, 128, 128)):
m, n, k = shape
a = tvm.nd.array(np.random.rand(m, k).astype("float32"))
b = tvm.nd.array(np.random.rand(n, k).astype("float32"))
c = tvm.nd.array(np.zeros((m, n), dtype="float32"))
c_np = np.dot(a.numpy(), b.numpy().transpose())
mod(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@tvm.script.ir_module
class LoweredModule:
@T.prim_func
def main(
A: T.Buffer[(16384,), "float32"],
B: T.Buffer[(16384,), "float32"],
C: T.Buffer[(16384,), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "from_legacy_te_schedule": True, "tir.noalias": True})
T.preflattened_buffer(A, [128, 128], data=A.data)
T.preflattened_buffer(B, [128, 128], data=B.data)
T.preflattened_buffer(C, [128, 128], data=C.data)
for x, y in T.grid(128, 128):
C[x * 128 + y] = 0.0
for k in T.serial(0, 128):
C[x * 128 + y] = C[x * 128 + y] + A[x * 128 + k] * B[y * 128 + k]
@tvm.script.ir_module
class LoweredTIRModule:
@T.prim_func
def main(
A: T.Buffer[(16384,), "float32"],
B: T.Buffer[(16384,), "float32"],
C: T.Buffer[(16384,), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
T.preflattened_buffer(A, [128, 128], data=A.data)
T.preflattened_buffer(B, [128, 128], data=B.data) |
T.preflattened_buffer(C, [128, 128], data=C.data)
for x, y in T.grid(128, 128):
C[x * 128 + y] = 0.0
for k in T.serial(0, 128):
C[x * 128 + y] = C[x * 128 + y] + A[x * 128 + k] * B[y * 128 + k]
def test_lower_build_te_schedule():
m, n, k = 128, 128, 128
axis_k = te.reduce_axis((0, k), "k")
A = te.placeholder((m, k), name="A")
B = te.placeholder((k, n), name="B")
C = te.compute((m, n), lambda x, y: te.sum(A[x, axis_k] * B[y, axis_k], axis=axis_k), name="C")
s = te.create_schedule(C.op)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
ir_mod = tvm.lower(s, [A, B, C])
tvm.ir.assert_structural_equal(ir_mod, LoweredModule)
mod = tvm.build(s, [A, B, C], target="llvm")
_check_module_with_numpy(mod)
def test_lower_build_tir_func():
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
ir_mod = tvm.lower(matmul)
tvm.ir.assert_structural_equal(ir_mod, LoweredTIRModule)
mod = tvm.build(matmul, target="llvm")
_check_module_with_numpy(mod)
def test_lower_build_tir_module():
func = matmul.with_attr("global_symbol", "main")
func = func.with_attr("tir.noalias", True)
ir_mod = IRModule({"main": func})
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
lowered_mod = tvm.lower(ir_mod)
tvm.ir.assert_structural_equal(lowered_mod, LoweredTIRModule)
mod = tvm.build(ir_mod, target="llvm")
_check_module_with_numpy(mod)
def test_lower_build_lowered_module():
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
ir_mod = tvm.lower(LoweredTIRModule)
tvm.ir.assert_structural_equal(ir_mod, LoweredTIRModule)
mod = tvm.build(ir_mod, target="llvm")
_check_module_with_numpy(mod)
if __name__ == "__main__":
test_lower_build_te_schedule()
test_lower_build_ |
tir_func()
test_lower_build_tir_module()
test_lower_build_lowered_module() |
from tvm.meta_schedule.arg_info |
import ArgInfo, TensorInfo
from tvm.script |
import tir as T
@T.prim_func
def Matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (128, 256), "float32")
B = T.match_buffer(b, (256, 512), "float32")
C = T.match_buffer(c, (128, 512), "float32")
for i, j, k in T.grid(128, 256, 512):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
def test_meta_schedule_tensor_info_creation():
info = TensorInfo("float32", [1, 224, 224, 3])
info = str(info)
assert info == 'TensorInfo("float32", [1, 224, 224, 3])'
def test_meta_schedule_tensor_info_as_json():
info = TensorInfo("float32", [1, 224, 224, 3])
info = info.as_json()
assert info == ["TENSOR", "float32", [1, 224, 224, 3]]
def test_meta_schedule_tensor_info_from_json():
info = ["TENSOR", "float32", [1, 224, 224, 3]]
info = TensorInfo.from_json(info)
assert str(info) == 'TensorInfo("float32", [1, 224, 224, 3])'
def test_meta_schedule_arg_info_from_prim_func():
a_info, b_info, c_info = ArgInfo.from_prim_func(Matmul)
assert str(a_info) == 'TensorInfo("float32", [128, 256])'
assert str(b_info) == 'TensorInfo("float32", [256, 512])'
assert str(c_info) == 'TensorInfo("float32", [128, 512])'
if __name__ == "__main__":
test_meta_schedule_tensor_info_creation()
test_meta_schedule_tensor_info_as_json()
test_meta_schedule_tensor_info_from_json()
test_meta_schedule_arg_info_from_prim_func() |
""" Test Meta Schedule Builder """ |
import os |
import sys |
import time
from typing |
import List |
import pytest |
import tvm.testing
from tvm |
import script
from tvm._ffi |
import register_func
from tvm.meta_schedule.builder |
import (
BuilderInput,
BuilderResult,
LocalBuilder,
PyBuilder,
)
from tvm.runtime |
import Module
from tvm.script |
import tir as T
from tvm.target |
import Target
@script.ir_module
class MatmulModule:
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "matmul", "tir.noalias": True})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@script.ir_module
class MatmulReluModule:
@T.prim_func
def matmul_relu(
a: T.handle, b: T.handle, d: T.handle
) -> None:
T.func_attr({"global_symbol": "matmul_relu", "tir.noalias": True})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
D = T.match_buffer(d, (1024, 1024), "float32")
C = T.alloc_buffer((1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(1024, 1024):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.max(C[vi, vj], 0.0)
@script.ir_module
class BatchMatmulModule:
@T.prim_func
def batch_matmul(
a: T.handle, b: T.handle, c: T.handle
) -> None:
T.func_attr({"global_symbol": "batch_matmul", "tir.noalias": True})
A = T.match_buffer(a, [16, 128, 128])
B = T.match_buffer(b, [16, 128, 128])
C = T.match_buffer(c, [16, 128, 128])
for n, i, j, k in T.grid(16, 128, 128, 128):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, |
k])
with T.init():
C[vn, vi, vj] = 0.0
C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk]
def _check_build_results(builder_results: List[BuilderResult]):
"""Simple check whether the build is successful"""
for result in builder_results:
artifact_path = result.artifact_path
error_msg = result.error_msg
assert artifact_path is not None
assert error_msg is None
os.remove(artifact_path)
os.rmdir(os.path.dirname(artifact_path))
def test_meta_schedule_single_build():
"""Test meta schedule builder for a single build"""
mod = MatmulModule
builder = LocalBuilder()
builder_inputs = [BuilderInput(mod, Target("llvm"))]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
_check_build_results(builder_results)
def test_meta_schedule_multiple_build():
"""Test meta schedule builder for multiple builds"""
builder = LocalBuilder()
builder_inputs = [
BuilderInput(MatmulModule, Target("llvm")),
BuilderInput(MatmulReluModule, Target("llvm")),
BuilderInput(BatchMatmulModule, Target("llvm")),
]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
_check_build_results(builder_results)
def test_meta_schedule_error_handle_test_builder():
"""Test the error handing during building""" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.