text
stringlengths 1
2.05k
|
---|
class TestSimplifyRHSOfBooleanAndUsingLHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts.
Like TestSimplifyRHSOfBooleanAndUsingLHS, but with variables in
the conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 5 and n < m + 10
def expected(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 5 |
class TestSimplifyLHSOfBooleanAndUsingRHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts for their arguments.
Like TestSimplifyLHSOfBooleanAndUsingRHS, but with variables in
the conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 10 and n < m + 5
def expected(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 5 |
class TestSimplifyRHSOfBooleanOrUsingLHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts.
Like TestSimplifyRHSOfBooleanOrUsingLHS, but with variables in the
conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 10 or n < m + 5
def expected(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 10 |
class TestSimplifyLHSOfBooleanOrUsingRHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts for their arguments.
Like TestSimplifyLHSOfBooleanOrUsingRHS, but with variables in the
conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 5 or n < m + 10
def expected(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 10 |
class TestProvableConditionWithOffset(BaseBeforeAfter):
"""Use scoped-constraint to prove inequalities"""
transitively_prove_inequalities = False
def before(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32):
if i < j:
A[0] = i < j + 1
def expected(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32):
if i < j:
A[0] = True |
class TestMostRestrictiveConditional(BaseBeforeAfter):
"""Preferentially prove part of a compound conditional.
Even if we cannot prove a conditional as true or false on its own,
proving that a conditional must satisfy a stronger condition may
allow for later rewrites. For example, if it is known that `a <= b`,
then `a >= b` cannot be proven, but can be reduced to `a == b`.
"""
i, j, k = [tvm.tir.Var(name, "int32") for name in "ijk"]
tir_int = tvm.tir.IntImm("int32", 0)
test_case = tvm.testing.parameter(
(i <= tir_int, tir_int <= i, i == tir_int),
(i <= tir_int, i != tir_int, i < tir_int),
(i != tir_int, i <= tir_int, i < tir_int),
(i != tir_int, tir_int <= i, tir_int < i),
(i <= j, j <= i, j == i),
(i <= j, i != j, i < j),
(i != j, i <= j, i < j),
(i != j, j <= i, j < i),
)
@tvm.testing.fixture
def before(self, test_case):
priors, expr_before, _ = test_case
@T.prim_func
def func(A: T.Buffer[1, "bool"]):
if priors:
A[0] = expr_before
return func
@tvm.testing.fixture
def expected(self, test_case):
priors, _, expr_after = test_case
@T.prim_func
def func(A: T.Buffer[1, "bool"]):
if priors:
A[0] = expr_after
return func
if __name__ == "__main__":
tvm.testing.main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
@tvm.testing.requires_cuda
def test_split_host_device_func_attr():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], factor=8)
s[A2].bind(xo, te.thread_axis("blockIdx.x"))
s[A1].compute_at(s[A2], xo)
s[A1].set_scope("shared")
mod = tvm.lower(s, [A, A2], name="f")
cuda_target = tvm.target.Target("cuda")
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr({"global_symbol": "test", "target": cuda_target})
)(mod)
fdevice = tvm.tir.transform.SplitHostDevice()(mod)["test_kernel0"]
assert fdevice.attrs["global_symbol"] == "test_kernel0"
assert fdevice.attrs["calling_conv"].value == 2
assert fdevice.attrs["target"] == cuda_target
assert fdevice.attrs["tir.is_global_func"].value
if __name__ == "__main__":
test_split_host_device_func_attr()
|
import tvm |
import tvm.testing
from tvm |
import te
from tvm.driver.build_module |
import schedule_to_module
from tvm.script |
import tir as T
from tvm.relay |
import GlobalVar
def test_flatten2():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], 8)
s[A1].compute_at(s[A2], xo)
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="A")
A2b = tvm.tir.decl_buffer(A2.shape, A2.dtype, name="A2")
mod = schedule_to_module(s, [Ab, A2b], binds={A: Ab, A2: A2b})
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def test_flatten_prefetch():
A = te.placeholder((25, 100, 4), name="A")
_A = tvm.tir.decl_buffer(A.shape, A.dtype, name="A")
i = te.size_var("i")
j = te.size_var("j")
region = [tvm.ir.Range.from_min_extent(i[0], i[1]) for i in [(i, 2), (j, 8), (0, 4)]]
stmt = tvm.tir.Prefetch(_A, region)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([_A], stmt, {A: _A})
mod = tvm.IRModule.from_expr(func)
mod = tvm.transform.Sequential(
[tvm.tir.transform.StorageFlatten(64), tvm.tir.transform.Simplify()]
)(mod)
stmt = mod["main"].body
assert stmt.extent.value == 2
assert isinstance(stmt.body, tvm.tir.For)
assert stmt.body.extent.value == 2
def assert_flat_loads(stmt):
if isinstance(stmt, tvm.tir.BufferLoad):
assert len(stmt.indices) == 1, "All prefetch indices should be flattened"
tvm.tir.stmt_functor.post_order_visit(stmt, assert_flat_loads)
def test_flatten_storage_align():
m = 8
l = 16
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
s[A1].storage_align(A1.op.axis[0], 2, 1)
mod = schedule_to_module(s, [A, A2])
mod = tvm.transform.Sequential(
[tvm.tir.transform.StorageFlatten(64), tvm.tir.transform.Simplify()]
)(mod)
stmt = mod["main"].body |
assert stmt.extents[0].value == 17 * 8
def test_flatten_double_buffer():
@tvm.script.ir_module
class ModFromScript:
@T.prim_func
def main(A_param: T.handle, C_param: T.handle):
A = T.match_buffer(A_param, (400,), "float32", strides=[1])
C = T.match_buffer(C_param, (4,), "float32", strides=[1])
T.func_attr({"from_legacy_te_schedule": True})
threadIdx_x = T.env_thread("threadIdx.x")
T.launch_thread(threadIdx_x, 1)
for i in T.serial(0, 100):
B = T.decl_buffer([4], "float32", scope="shared")
with T.attr(B.data, "double_buffer_scope", 1):
for j in T.serial(0, 4):
B[j] = A[4 * i + j]
for j in T.serial(0, 4):
C[j] = B[j] + 1.0
mod = ModFromScript
with tvm.transform.PassContext(config={"tir.InjectDoubleBuffer": {"split_loop": 2}}):
mod = tvm.transform.Sequential(
[
tvm.tir.transform.StorageFlatten(64),
tvm.tir.transform.InjectDoubleBuffer(),
tvm.tir.transform.Simplify(),
]
)(mod)
stmt = mod["main"].body
assert isinstance(stmt.body, tvm.tir.Allocate)
assert list(stmt.body.extents) == [8]
mod = tvm.tir.transform.ThreadSync("shared")(mod)
f = mod["main"]
count = [0]
def count_sync(op):
if isinstance(op, tvm.tir.Call) and op.op.same_as(tvm.ir.Op.get("tir.tvm_storage_sync")):
count[0] += 1
tvm.tir.stmt_functor.post_order_visit(f.body, count_sync)
assert count[0] == 4
def test_flatten_let_buffer():
@tvm.script.ir_module
class module:
@T.prim_func
def main():
T.func_attr({"from_legacy_te_schedule": True})
A_data: T.Ptr[T.int32] = T.call_extern("dummy_extern_function", dtype="handle")
A = T.decl_buffer([1], dtype="float32", data=A_data)
T.evaluate(A[0]) |
tvm.tir.transform.StorageFlatten(64)(module)
@T.prim_func
def tir_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [2, 2])
B = T.match_buffer(a, [2, 2])
A[0, 1] = B[1, 1]
def test_flatten_tir():
orig_mod = tvm.IRModule({GlobalVar("main"): tir_func})
mod = tvm.tir.transform.StorageFlatten(64)(orig_mod)
tvm.ir.assert_structural_equal(
orig_mod, mod
)
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import te
from tvm.driver.build_module |
import schedule_to_module
from tvm.script |
import tir as T
def test_storage_share():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
num_stage = 5
B = A
for t in range(num_stage):
B = te.compute((m, l), lambda i, j: B[i, j] + (t + 1), name="A%d" % t)
s = te.create_schedule(B.op)
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
def register_mem(scope_tb, max_bits):
@tvm.register_func("tvm.info.mem.%s" % scope_tb)
def mem_info_inp_buffer():
return tvm.ir.make_node(
"MemoryInfo", unit_bits=16, max_simd_bits=32, max_num_bits=max_bits, head_address=None
)
def test_alloc_seq():
scope_tb = "local.L0A"
max_bits = 1024 * 1024 * 1024
register_mem(scope_tb, max_bits)
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="A", scope=scope_tb)
A[j] = 1.2
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="B", scope=scope_tb)
A[j] = 1.3
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 200
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_alloc_different_dtypes():
def stmt_generater(dtype_list, length):
ib = tvm.tir.ir_builder.create()
base_dtype = dtype_li |
st[0]
global_a = te.placeholder((length,), name="global_a", dtype=base_dtype)
assert len(dtype_list) == 4
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[0]
A = ib.allocate(dtype, length, name="A", scope="local.L0A")
A[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[1]
B = ib.allocate(dtype, length, name="B", scope="local.L0A")
B[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[2]
C = ib.allocate(dtype, length, name="C", scope="local.L0A")
C[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[3]
D = ib.allocate(dtype, length, name="D", scope="local.L0A")
D[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = "int8"
E = ib.allocate(dtype, length, name="E", scope="local.L0A")
E[j] = A[j].astype(dtype) + B[j].astype(dtype) + C[j].astype(dtype) + D[j].astype(dtype)
return ib.get()
def dtype_bit_len(dtype):
index = 0
for i in dtype:
if i.isdigit():
break
index += 1
return int(dtype[index:])
def offset_generater(dtype_list, length):
dtype_len_list = [dtype_bit_len(i) for i in dtype_list]
base_len = dtype_len_list[0]
return sum([i * length / base_len for i in dtype_len_list])
def dtype_test(dtype_list, length):
def verify(n):
if isinstance(n, tvm.tir.Allocate):
assert n.extents[0].value == offset
body = stmt_generater(dtype_list, length)
offset = offset_generater(dtype_list, length)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
tvm.tir.s |
tmt_functor.post_order_visit(body, verify)
length = 1024
dtype_list = ["float16", "int32", "uint16", "int8"]
dtype_test(dtype_list, length)
dtype_list = ["float32", "int32", "uint16", "int8"]
dtype_test(dtype_list, length)
dtype_list = ["float64", "int32", "uint16", "int8"]
dtype_test(dtype_list, length)
dtype_list = ["int8", "int32", "uint16", "uint8"]
dtype_test(dtype_list, length)
def test_inplace_rule():
m = 10
A = te.placeholder((m,), name="A")
A0 = te.compute((m,), lambda i: A[i], name="A0")
A1 = te.compute((m,), lambda i: A[i] + 1, name="A1")
AA = te.compute((m,), lambda i: A0[i] + A1[i] + A1[0], name="AA")
B = te.compute((m,), lambda i: AA[i] + 1, name="B")
s = te.create_schedule(B.op)
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 2
def test_storage_combine():
n = 8
A = te.placeholder((4,), name="A")
num_stage = 5
B = A
stages = []
for t in range(num_stage):
B = te.compute((n,), lambda i: B[i] + B[0] + (t + 1), name="A%d" % t)
stages.append(B)
s = te.create_schedule(B.op)
for S in stages[:-1]:
s[S].set_scope("global:tag")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 16
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
def test_storage_combine_with_ve |
ctorization():
n = 1024
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute((n,), lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
AA = s.cache_read(A, "global:tag", readers=[C])
BB = s.cache_read(B, "global:tag", readers=[C])
CC = s.cache_write(C, "global:tag")
s[CC].vectorize(s[CC].op.axis[0])
mod = schedule_to_module(s, [A, B, C])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.VectorizeLoop()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
mod = tvm.tir.transform.Simplify()(mod)
stmt = mod["main"].body
num_alloc = [0]
def verify(v):
if (
isinstance(v, tvm.tir.Add)
and isinstance(v.a, tvm.tir.Load)
and isinstance(v.b, tvm.tir.Load)
):
lhs_ramp = v.a.index
rhs_ramp = v.b.index
assert lhs_ramp.lanes == n
assert rhs_ramp.lanes == n
assert lhs_ramp.base >= rhs_ramp.base + n or rhs_ramp.base >= lhs_ramp.base + n
elif isinstance(v, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
def test_storage_share_gpu():
m = te.var("m")
A = [te.placeholder((m), name="A")]
num_stage = 5
for t in range(num_stage):
A.append(te.compute((m,), lambda i: A[-1][i] + (t + 1), name="A%d_s" % t))
A.append(te.compute((m,), lambda i: A[-1][i], name="A%d" % t))
s = te.create_schedule(A[-1].op)
for t in range(num_stage):
x = A[2 * t + 2].op.axis[0]
bx, tx = s[A[2 * t + 2]].split(x, factor=32)
s[A[2 * t + 2]].bind(bx, te.thread_axis("blockIdx.x"))
s[A[2 * t + 2]].bind(tx, te.thread_axis("threadIdx.x"))
s[A[2 * t + 1]].compute_at(s[A[2 * t + 2]], tx)
s[A[2 * t + 1]].set_scope("shared")
mod = schedule_to_module(s, [A[0], A[-1]])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = |
tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
alloc_stats = {"global": 0, "shared": 0}
def verify(n):
if isinstance(n, tvm.tir.Allocate):
scope = n.buffer_var.type_annotation.storage_scope
alloc_stats[scope] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert alloc_stats["global"] == 2
assert alloc_stats["shared"] == num_stage
def test_parallel_alloc():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i", kind="parallel") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", n, name="A", scope="global")
A[j] = A[j] + 2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
assert isinstance(body.body.body, tvm.tir.Allocate)
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="t") as i:
ib.scope_attr(
tvm.tir.const(1, "int32"), "pragma_scope", tvm.tir.StringImm("parallel_launch_point")
)
with ib.for_range(0, n, name="i", kind="parallel") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", n, name="A", scope="global")
A[j] = A[j] + 2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
assert isinstance(body.body.body.body.body, tvm.tir.Allocate)
def test_while_alloc():
def get_mod(kind="serial"):
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i", kind=kind) as i:
j = ib.allocate("int32", 1, name="j", scope="global")
j[0] = 0
with ib.while_loop(j[0] < 10):
A = ib.allocate("float32", n, name="A", scope="global")
A[j[0]] = A[j[0]] + 2 |
j[0] += j[0] + 1
body = ib.get()
return tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
mod = get_mod(kind="parallel")
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
assert isinstance(body.body.body, tvm.tir.Allocate)
assert isinstance(body.body.body.body, tvm.tir.Allocate)
mod = get_mod(kind="serial")
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
assert isinstance(body.body, tvm.tir.Allocate)
assert isinstance(body.body.body, tvm.tir.Allocate)
def test_inplace_rule2(scope_tb="local_TB2", max_bits=1024 * 1024 * 1024):
register_mem(scope_tb, max_bits)
m = 10
A = te.placeholder((m,), name="A")
C = te.placeholder((m,), name="C")
D = te.placeholder((m,), name="D")
A0 = te.compute((m,), lambda i: A[i] + C[i], name="A0")
A1 = te.compute((m,), lambda i: D[i] * D[i], name="A1")
A2 = te.compute((m,), lambda i: A0[i] + A1[i], name="A2")
B = te.compute((m,), lambda i: A2[i], name="B")
s = te.create_schedule(B.op)
A0L = s.cache_read(A0, scope_tb, [A2])
A1L = s.cache_read(A1, scope_tb, [A2])
A2L = s.cache_read(A2, scope_tb, [B])
mod = schedule_to_module(s, [A, B, C, D])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 2
def test_exceed_mem():
max_bits = 639
loc = -1
try:
test_inplace_rule2("local_TEM", max_bits)
except Exception as e:
estr = str(e)
loc = estr.find("Allocation exceed bound of memory")
assert l |
oc != -1
def test_inplace_rule3():
scope_tb = "local_TB3"
max_bits = 1024 * 1024 * 1024
register_mem(scope_tb, max_bits)
m = 10
B0 = te.placeholder((m,), name="B0")
B1 = te.placeholder((m,), name="B1")
B2 = te.placeholder((m,), name="B2")
B3 = te.placeholder((m,), name="B3")
B4 = te.placeholder((m,), name="B4")
B5 = te.placeholder((m,), name="B5")
B6 = te.compute((m,), lambda i: B1[i] * B5[i], name="B6")
B7 = te.compute((m,), lambda i: B2[i] * B4[i], name="B7")
B8 = te.compute((m,), lambda i: B6[i] - B7[i], name="B8")
B9 = te.compute((m,), lambda i: B2[i] * B3[i], name="B9")
B10 = te.compute((m,), lambda i: B0[i] * B5[i], name="B10")
B11 = te.compute((m,), lambda i: B9[i] - B10[i], name="B11")
B12 = te.compute((m,), lambda i: B0[i] * B4[i], name="B12")
B13 = te.compute((m,), lambda i: B1[i] * B3[i], name="B13")
B14 = te.compute((m,), lambda i: B12[i] - B13[i], name="B14")
B = te.compute((m,), lambda i: B8[i] * B11[i] + B14[i], name="B")
s = te.create_schedule(B.op)
B1L = s.cache_read(B1, scope_tb, [B6, B13])
B5L = s.cache_read(B5, scope_tb, [B6, B10])
B2L = s.cache_read(B2, scope_tb, [B7, B9])
B4L = s.cache_read(B4, scope_tb, [B7, B12])
B3L = s.cache_read(B3, scope_tb, [B9, B13])
B0L = s.cache_read(B0, scope_tb, [B10, B12])
B8L = s.cache_write(B8, scope_tb)
B11L = s.cache_write(B11, scope_tb)
B14L = s.cache_write(B14, scope_tb)
B6L = s.cache_write(B6, scope_tb)
B7L = s.cache_write(B7, scope_tb)
B9L = s.cache_write(B9, scope_tb)
B10L = s.cache_write(B10, scope_tb)
B12L = s.cache_write(B12, scope_tb)
B13L = s.cache_write(B13, scope_tb)
s[B12].compute_inline()
s[B13].compute_inline()
s[B8].compute_inline()
s[B11].compute_inline()
s[B14].compute_inline()
s[B6].compute_inline()
s[B7].compute_inline()
s[B9].compute_inline()
s[B10].compute_inline()
s = s.normalize()
mod = schedule_to_module(s, [B0, B1, B2, B3, B4, B5, B |
])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
def verify(n):
if isinstance(n, tvm.tir.Allocate):
assert n.extents[0].value == 70
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
def test_alloc_seq_type():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="A", scope="local.L0A")
A1 = ib.allocate("float32", 200, name="A1", scope="local.L0A")
A[j] = 1.2
A1[j] = 1.3
B = ib.allocate("int16", 200, name="B", scope="local.L0A")
B[j] = tvm.tir.const(1, "int16")
C = ib.allocate("int16", 200, name="C", scope="local.L0A")
C[j] = tvm.tir.const(1, "int16")
D = ib.allocate("int16", 200, name="D", scope="local.L0A")
D[j] = B[j] + C[j]
A2 = ib.allocate("float32", 200, name="A2", scope="local.L0A")
A2[j] = A[j]
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 500
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_alloc_seq_type2():
scope_tb = "local.L0A2"
max_bits = 1024 * 1024 * 1024
register_mem(scope_tb, max_bits)
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="A", scope=scope_tb)
A[j] = 1.2
with ib.for_range(0, 20, name="j") as j:
B = ib.allocate("int16", 400, name="B", scope=scope_tb) |
B[j] = tvm.tir.const(1, "int16")
with ib.for_range(0, 10, name="j") as j:
C = ib.allocate("float32", 200, name="C", scope=scope_tb)
C[j] = 1.2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 200
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_reuse_small_buffer():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("int16", 200, name="A", scope="local.L0A")
A[j] = tvm.tir.const(1, "int16")
B = ib.allocate("int16", 200, name="B", scope="local.L0A")
B[j] = tvm.tir.const(1, "int16")
B1 = ib.allocate("int16", 200, name="B1", scope="local.L0A")
B1[j] = A[j] + B[j]
C = ib.allocate("int16", 400, name="C", scope="local.L0A")
C[j] = tvm.tir.const(1, "int16")
D = ib.allocate("int16", 400, name="D", scope="local.L0A")
D[j] = tvm.tir.const(1, "int16")
E = ib.allocate("int16", 400, name="E", scope="local.L0A")
E[j] = C[j]
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 800
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_replace_dataflow():
shape = (255,)
A = te.placeholder(shape, name="A")
B = te.compute(shape, lambda i: A[i] + A[i], name="B")
C = te.compute(shape, lambda i: A[i] + B[i], name="C")
D = te.compute(s |
hape, lambda i: A[i] + C[i], name="D")
E = te.compute(shape, lambda i: A[i] + D[i], name="E")
s = te.create_schedule(E.op)
s.cache_read(A, "local", [B, C, D, E])
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
def test_large_input():
@te.hybrid.script
def compute(a, b):
n = 16384
c = output_tensor((n, n), "int32")
for i in range(n):
for j in range(n):
c[i, j] = a[i, j] - b[i, j]
return c
n = 16384
shape = (n, n)
a = te.placeholder(shape, name="a", dtype="int32")
b = te.placeholder(shape, name="b", dtype="int32")
c = te.compute(shape, lambda i, j: compute(a, b)[i, j])
c = te.compute(shape, lambda i, j: 1 + c[i, j])
s = te.create_schedule(c.op)
stmt = tvm.lower(s, [a, b, c])["main"].body
def verify(n):
if isinstance(n, tvm.tir.Allocate):
assert n.extents[0].value == 268435456
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
def test_access_in_let_value():
@T.prim_func
def func(A: T.Buffer[(8,), "float32"]):
for i in range(8):
B_data = T.allocate((1,), "float32", "global")
B = T.buffer_decl(shape=[1], dtype="float32", data=B_data)
B[0] = 3.14
x: T.float32 = T.exp(B[0], dtype="float32")
A[i] = (x + 1.0) / (x - 1.0)
@T.prim_func
def func_rewritten(A: T.Buffer[(8,), "float32"]) -> None:
B_data = T.allocate((1,), "float32", "global")
B = T.buffer_decl(shape=[1], dtype="float32", data=B_data)
for i in range(8):
B[0] = 3.14
x: T.float32 = T.exp(B[0], dtype="float32")
A[i] = (x + 1.0) / (x - 1.0)
mod = tvm.tir.transform.StorageRewrite()(tvm.IRModule.from_expr(func))
tvm.ir.assert_structural_equal(mod["main"], func_rewritten) |
class BaseCompare(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.StorageRewrite() |
class TestLetBufferRewrite(BaseCompare):
"""StorageRewrite replaces the bound var of backing allocations
If StorageRewrite replaces the backing variable of an array, such
as when vectorizing the storage type, the variable must be
replaced in the LetStmt that defines it. Currently, StmtMutator
only visits usage of variables, and does not visit definitions of
variables, so the definition in a LetStmt must be explicitly
handled.
"""
def before() -> None:
A_data: T.Ptr[T.int32] = T.call_extern("dummy_func", dtype="handle")
A = T.buffer_decl([8], "int32", data=A_data)
A[0:8] = T.broadcast(42, 8)
def expected() -> None:
A_data: T.Ptr[T.int32x8] = T.call_extern("dummy_func", dtype="handle")
A = T.buffer_decl([1], "int32x8", data=A_data)
A[0] = T.broadcast(42, 8) |
class TestRewriteInPlaceUseOfNonFlatBuffer(BaseCompare):
"""A non-flat buffer may be re-used for in-place operations"""
def before(A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]):
B_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
B = T.buffer_decl(
[16, 16],
dtype="float32",
axis_separators=[1],
data=B_data,
)
C_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
C = T.buffer_decl(
[16, 16],
dtype="float32",
axis_separators=[1],
data=C_data,
)
for i, j in T.grid(16, 16):
B[i, j] = A[i, j]
for i, j in T.grid(16, 16):
C[i, j] = 2.0 * B[i, j]
for i, j in T.grid(16, 16):
D[i, j] = C[i, j]
def expected(A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]):
B_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
B = T.buffer_decl([16, 16], dtype="float32", axis_separators=[1], data=B_data)
C = T.buffer_decl(
[16, 16],
dtype="float32",
axis_separators=[1],
data=B.data,
)
for i, j in T.grid(16, 16):
B[i, j] = A[i, j]
for i, j in T.grid(16, 16):
C[i, j] = 2.0 * B[i, j]
for i, j in T.grid(16, 16):
D[i, j] = C[i, j] |
class TestNoRewriteOfSharedNonFlatBuffer(BaseCompare):
"""In general, sharing of non-flat buffer isn't supported
The current packing algorithms in StorageRewrite assume a flat
memory space, and do not support packing of N-d buffers. For
buffers with axis separators, normal buffer sharing should be
disabled.
Like TestRewriteInPlaceUseOfNonFlatBuffer, except that B and C do
not have matching shapes.
"""
def before(A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]):
B_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
B = T.buffer_decl(
[16, 16],
dtype="float32",
axis_separators=[1],
data=B_data,
)
C_data = T.allocate(
[20, 20],
dtype="float32",
scope="global",
)
C = T.buffer_decl(
[20, 20],
dtype="float32",
axis_separators=[1],
data=C_data,
)
for i, j in T.grid(16, 16):
B[i, j] = A[i, j]
for i, j in T.grid(16, 16):
C[i, j] = 2.0 * B[i, j]
for i, j in T.grid(16, 16):
D[i, j] = C[i, j]
expected = before
if __name__ == "__main__":
tvm.testing.main() |
import tvm |
import tvm.testing
from tvm |
import te
from tvm.script |
import tir as T
def run_passes(func: tvm.tir.PrimFunc):
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
cuda_target = tvm.target.Target("cuda")
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr({"global_symbol": "test", "target": cuda_target})
)(mod)
mod = tvm.tir.transform.SplitHostDevice()(mod)
return tvm.tir.transform.ThreadSync("shared")(mod)
@tvm.testing.requires_cuda
def test_thread_storage_sync():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], factor=8)
s[A2].bind(xo, te.thread_axis("blockIdx.x"))
s[A1].compute_at(s[A2], xo)
s[A1].set_scope("shared")
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([A, A2], stmt, None)
mod = run_passes(func)
f = mod["test_kernel0"]
body_list = tvm.tir.stmt_list(f.body.body.body)
assert body_list[1].value.op.same_as(tvm.ir.Op.get("tir.tvm_storage_sync"))
@tvm.testing.requires_cuda
def test_sync_else_branch():
def ir(A, B):
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", 1)
local = ib.allocate(A.dtype, (8,), name="buf_local", scope="local")
shared = ib.allocate(A.dtype, (8,), name="buf_shared", scope="shared")
with ib.for_range(0, 8) as i:
with ib.if_scope(Aptr[i] < 0):
local[i] = Aptr[i]
with ib.else_scope():
shared[i] = Aptr[i]
with ib.for_range(0, 8) as i:
with ib.if_scope(Aptr[i] < 0):
Bptr[i] = local[i] |
with ib.else_scope():
Bptr[i] = shared[i]
return ib.get()
A = tvm.tir.decl_buffer((8,), "float32")
B = tvm.tir.decl_buffer((8,), "float32")
stmt = ir(A, B)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([A, B], stmt, None)
mod = run_passes(func)
assert "@tir.tvm_storage_sync" in str(mod)
@tvm.testing.requires_cuda
def test_sync_read_thread_id_independent_location():
@T.prim_func
def func(p0: T.Buffer[2, "float32"], p1: T.Buffer[2, "float32"]) -> None:
threadIdx_x = T.env_thread("threadIdx.x")
blockIdx_x = T.env_thread("blockIdx.x")
T.preflattened_buffer(p0, [1, 2, 1, 1], dtype="float32", data=p0.data)
result_local = T.alloc_buffer([1], dtype="float32", scope="local")
temp_shared = T.alloc_buffer([1], dtype="float32", scope="shared")
T.launch_thread(blockIdx_x, 8)
T.launch_thread(threadIdx_x, 4)
result_local[0] = T.float32(0)
if threadIdx_x < 1:
temp_shared[0] = p0[0]
result_local[0] = result_local[0] + temp_shared[0] * p1[0]
if threadIdx_x < 1:
temp_shared[0] = p0[1]
result_local[0] = result_local[0] + temp_shared[0] * p1[1]
mod = run_passes(func)
assert "@tir.tvm_storage_sync" in str(mod)
if __name__ == "__main__":
test_thread_storage_sync()
test_sync_else_branch()
test_sync_read_thread_id_independent_location() |
import pytest |
import sys |
import tvm |
import tvm.testing
from tvm |
import te
from tvm.script |
import tir as T
def _check(original, transformed):
mod = tvm.IRModule.from_expr(original)
mod = tvm.tir.transform.UnifyThreadBinding()(mod)
mod = tvm.tir.transform.Simplify()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed, True)
def _check_fail(original):
mod = tvm.IRModule.from_expr(original)
with pytest.raises(ValueError):
tvm.tir.transform.UnifyThreadBinding()(mod)
@T.prim_func
def element_wise_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i in T.thread_binding(0, 128, "blockIdx.x"):
for j0_0 in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0
for j1_0 in T.thread_binding(0, 4, "threadIdx.x"):
for j1_1 in T.serial(0, 32):
with T.block(""):
C[i, j1_0 * 32 + j1_1] = B[i, j1_0 * 32 + j1_1] + 1.0
@T.prim_func
def unified_element_wise_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"):
for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[blockIdx_x, threadIdx_x * 32 + j0_1] = (
A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0
)
for j1_1 in T.serial(0, 32):
with T.block(""):
C[blockIdx_x, threadIdx_x * 32 + j1_1] = (
B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0
)
@T.prim_func
def element_wise_thread_x_different_dtype(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(12 |
8, 128), "float32"],
) -> None:
for i in T.thread_binding(128, "blockIdx.x"):
for j0_0 in T.thread_binding(4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0
for j1_0 in T.thread_binding(T.int64(4), "threadIdx.x"):
for j1_1 in T.serial(T.int64(32)):
with T.block(""):
C[i, j1_0 * T.int64(32) + j1_1] = B[i, j1_0 * T.int64(32) + j1_1] + 1.0
@T.prim_func
def unified_element_wise_thread_x_different_dtype(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
for blockIdx_x in T.thread_binding(128, "blockIdx.x"):
for threadIdx_x in T.thread_binding(4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[blockIdx_x, threadIdx_x * 32 + j0_1] = (
A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0
)
for j1_1 in T.serial(T.int64(32)):
with T.block(""):
C[blockIdx_x, T.cast(threadIdx_x, "int64") * T.int64(32) + j1_1] = (
B[blockIdx_x, T.cast(threadIdx_x, "int64") * T.int64(32) + j1_1] + 1.0
)
@T.prim_func
def element_wise_env_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None:
j1_0 = T.env_thread("threadIdx.x")
j0_0 = T.env_thread("threadIdx.x")
i = T.env_thread("blockIdx.x")
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
T.launch_thread(i, 128)
T.launch_thread(j0_0, 4)
T.launch_thread(j1_0, 4)
for j0_1 in T.serial(0, 32):
with T.block(""):
B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0
for j1_1 in T.serial(0, 32):
with T.block(""):
C[i, j1_0 * 32 + j1_1] = B[i, j1_0 * 32 + j1_1] + 1.0
@T.prim_func
def unified_elem |
ent_wise_env_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"):
for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[blockIdx_x, threadIdx_x * 32 + j0_1] = (
A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0
)
for j1_1 in T.serial(0, 32):
with T.block(""):
C[blockIdx_x, threadIdx_x * 32 + j1_1] = (
B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0
)
@T.prim_func
def element_wise_vthread_x(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
for i_0 in T.thread_binding(0, 2, "vthread.x"):
for i_1 in T.thread_binding(0, 64, "threadIdx.x"):
for j_0 in T.thread_binding(0, 2, "vthread.x"):
for j_1 in T.serial(0, 64):
with T.block(""):
B[i_0 * 64 + i_1, j_0 * 64 + j_1] = A[i_0 * 64 + i_1, j_0 * 64 + j_1] * 2.0
@T.prim_func
def unified_element_wise_vthread_x(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
for vthread_x in T.thread_binding(0, 2, "vthread.x"):
for threadIdx_x in T.thread_binding(0, 64, "threadIdx.x"):
for j_1 in T.serial(0, 64):
with T.block(""):
B[vthread_x * 64 + threadIdx_x, vthread_x * 64 + j_1] = (
A[vthread_x * 64 + threadIdx_x, vthread_x * 64 + j_1] * 2.0
)
@T.prim_func
def element_wise_two_thread_x_in_same_kernel_not_equal(
a: T.handle, b: T.handle, c: T.handle
) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [ |
128, 64])
for i in T.thread_binding(0, 128, "blockIdx.x"):
for j0 in T.thread_binding(0, 128, "threadIdx.x"):
B[i, j0] = A[i, j0] * 2.0
for j1 in T.thread_binding(0, 64, "threadIdx.x"):
C[i, j1] = A[i, j1] + 1.0
@T.prim_func
def element_wise_kernels_with_different_size(
a: T.handle, b: T.handle, c: T.handle, d: T.handle
) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [256, 256])
D = T.match_buffer(d, [256, 256])
for i0 in T.thread_binding(0, 128, "blockIdx.x"):
for j0 in T.thread_binding(0, 128, "threadIdx.x"):
B[i0, j0] = A[i0, j0] * 2.0
for i1 in T.thread_binding(0, 256, "blockIdx.x"):
for j1 in T.thread_binding(0, 256, "threadIdx.x"):
D[i1, j1] = C[i1, j1] + 1.0
@T.prim_func
def unified_element_wise_kernels_with_different_size(
a: T.handle, b: T.handle, c: T.handle, d: T.handle
) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [256, 256])
D = T.match_buffer(d, [256, 256])
for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"):
for threadIdx_x in T.thread_binding(0, 128, "threadIdx.x"):
B[blockIdx_x, threadIdx_x] = A[blockIdx_x, threadIdx_x] * 2.0
for blockIdx_x in T.thread_binding(0, 256, "blockIdx.x"):
for threadIdx_x in T.thread_binding(0, 256, "threadIdx.x"):
D[blockIdx_x, threadIdx_x] = C[blockIdx_x, threadIdx_x] + 1.0
@T.prim_func
def element_wise_implicit_block(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i in T.thread_binding(0, 128, "threadIdx.y"):
for j0_0 in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0
for j1_0 in T.thread_binding(0, 4 |
, "threadIdx.x"):
for j1_1 in T.serial(0, 32):
with T.block(""):
C[i, j1_0 * 32 + j1_1] = B[i, j1_0 * 32 + j1_1] + 1.0
@T.prim_func
def unified_element_wise_implicit_block(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for blockIdx_x in T.thread_binding(0, 128, "threadIdx.y"):
for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[blockIdx_x, threadIdx_x * 32 + j0_1] = (
A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0
)
for j1_1 in T.serial(0, 32):
with T.block(""):
C[blockIdx_x, threadIdx_x * 32 + j1_1] = (
B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0
)
def test_thread_x():
_check(element_wise_thread_x, unified_element_wise_thread_x)
def test_thread_x_different_dtype():
_check(element_wise_thread_x_different_dtype, unified_element_wise_thread_x_different_dtype)
def test_env_thread_x():
_check(element_wise_env_thread_x, unified_element_wise_env_thread_x)
def test_vthread_x():
_check(element_wise_vthread_x, unified_element_wise_vthread_x)
def test_two_thread_x_in_same_kernel_not_equal():
_check_fail(element_wise_two_thread_x_in_same_kernel_not_equal)
def test_kernels_with_different_size():
_check(
element_wise_kernels_with_different_size, unified_element_wise_kernels_with_different_size
)
def test_implicit_block():
_check(element_wise_implicit_block, unified_element_wise_implicit_block)
def test_lower_te():
a = te.placeholder((32, 2, 2))
b = te.compute((32, 2, 2), lambda i, j, k: a[i, j, k] * 2.0)
s = te.create_schedule(b.op)
s[b].bind(b.op.axis[1], te.thread_axis("threadIdx.x"))
s[b].bind(b.op.axis[2], te.thread_axis("threadIdx.x")) |
orig_mod = tvm.driver.build_module.schedule_to_module(s, [a, b])
mod = tvm.tir.transform.UnifyThreadBinding()(orig_mod)
tvm.ir.assert_structural_equal(mod, orig_mod)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te
from tvm.script |
import tir as T |
import os
def test_unroll_loop():
ib = tvm.tir.ir_builder.create()
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
Aptr = ib.buffer_ptr(Ab)
with ib.for_range(n, n + 2, name="i") as i:
with ib.for_range(0, 8, name="i", kind="unroll") as j:
Aptr[j + 1] = Aptr[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt))
assert isinstance(stmt, tvm.tir.For)
with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 16}}):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert not isinstance(ret, tvm.tir.For)
with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 15}}):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert isinstance(ret, tvm.tir.For)
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_step": 16, "explicit_unroll": False}}
):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert isinstance(ret, tvm.tir.For)
assert ret.kind == tvm.tir.ForKind.UNROLLED
ib = tvm.tir.ir_builder.create()
ib.scope_attr(tvm.tir.const(0, "int32"), "pragma_auto_unroll_max_step", 16)
ib.emit(stmt)
wrapped = ib.get()
wrapped = tvm.tir.SeqStmt([wrapped, stmt])
assert isinstance(ret, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], wrapped))
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_depth": 8, "explicit_unroll": False}}
):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert isinstance(ret[0], tvm.tir.For)
assert ret[0].kind == tvm.tir.ForKind.UNROLLED
assert isinstance(ret[1], tvm.tir.For)
assert ret[1].kind != tvm.tir.ForKind.UNROLLED
def test_unroll_fake_loop():
ib = tvm.tir.ir_builder.create()
dtype = "int32"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
Aptr = ib.buffer_ptr(Ab)
wi |
th ib.for_range(0, 1, name="i") as i:
Aptr[i * 2] = 3
with ib.for_range(0, 10, name="j") as j:
Aptr[j + 1] = Aptr[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt))
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {"auto_max_depth": 8, "auto_max_extent": 1, "explicit_unroll": False}
}
):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert isinstance(ret[0], tvm.tir.BufferStore)
def test_unroll_single_count_loops():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda *i: A(*i), name="B")
s = te.create_schedule(B.op)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 1}}):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert ret == stmt
def test_unroll_allocations():
@tvm.script.ir_module
class before:
@T.prim_func
def main():
for i in T.unroll(2):
with T.decl_buffer([16], "float32") as buf:
buf[0] = 0.0
@tvm.script.ir_module
class expected:
@T.prim_func
def main():
with T.decl_buffer([16], "float32") as buf1:
buf1[0] = 0.0
with T.decl_buffer([16], "float32") as buf2:
buf2[0] = 0.0
after = tvm.tir.transform.UnrollLoop()(before)
tvm.ir.assert_structural_equal(after, expected)
if __name__ == "__main__":
test_unroll_loop()
test_unroll_fake_loop()
test_unroll_single_count_loops()
test_unroll_allocations() |
import tvm
from tvm |
import te
def test_vectorize_loop():
dtype = "int64"
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, kind="vectorize") as j:
A[j] = tvm.tir.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
assert not isinstance(stmt.body, tvm.tir.For)
assert len(stmt.body.indices) == 1
assert isinstance(stmt.body.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.body.value, tvm.tir.Broadcast)
def test_vectorize_vector():
dtype = "int64"
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32x4", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, kind="vectorize") as j:
A[j] = tvm.tir.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
assert not isinstance(stmt.body, tvm.tir.For)
assert len(stmt.body.indices) == 1
assert isinstance(stmt.body.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.body.value, tvm.tir.Broadcast)
def test_vectorize_with_if():
n = te.var("n")
x = te.var("x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(x < n):
A[i] = A[i] + 1
with ib.else_scope():
with ib.if_scope(i < n):
A[i] = 2.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n, x], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.IfThenElse)
assert len(stmt.then_case. |
indices) == 1
assert isinstance(stmt.then_case.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.then_case.value, tvm.tir.Add)
assert stmt.then_case.value.dtype == "float32x4"
assert isinstance(stmt.else_case, tvm.tir.For)
def test_vectorize_with_if_cond_int64():
m = te.size_var("m", dtype="int64")
A = te.placeholder((m,), name="A", dtype="float32")
B = te.compute((m,), lambda i: te.if_then_else(i < 2, A[i], A[i] * 2), name="B")
s = te.create_schedule(B.op)
x, y = s[B].split(B.op.axis[0], factor=4)
s[B].vectorize(y)
f = tvm.build(s, [A, B], "llvm")
def test_vectorize_let():
v = tvm.tir.Var("v", "float32")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
ib.emit(lambda body: tvm.tir.LetStmt(v, A[i] + 1, body))
A[i] = v + 2
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], ib.get()))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.LetStmt)
assert stmt.value.dtype == "float32x4"
def test_vectorize_with_le_cond():
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(i <= n):
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
def test_vectorize_with_ge_cond():
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(i >= n):
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
def test_vectorize_if_then_else():
n = te.var("n")
x = te.var(" |
x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
A[i] = tvm.tir.call_intrin("float32", "tir.if_then_else", i > 0, A[i] + 1, A[i])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n, x], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as k:
with ib.for_range(0, 4, kind="vectorize") as i:
A[k * 4 + i] = tvm.tir.call_intrin(
"float32", "tir.if_then_else", k > 0, A[k * 4 + i], 0
)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert not isinstance(stmt.body, tvm.tir.For)
assert isinstance(stmt.body.value.args[2], tvm.tir.Broadcast)
def test_vectorize_while_fail():
"""A while loop inside a vectorized loop should fail."""
n = 64
num_iter = 10
def test_ir(A, B, C):
ib = tvm.tir.ir_builder.create()
n = C.shape[0]
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
C = ib.buffer_ptr(C)
i = ib.allocate("int32", (1,), name="i", scope="local")
i[0] = 0
with ib.for_range(0, n) as j:
C[j] = 0.0
with ib.for_range(0, n, kind="vectorize") as j:
with ib.while_loop(i[0] < num_iter):
C[j] += A[j] + B[j]
i[0] += 1
return ib.get()
dtype = "float32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_ir(ins[0], ins[1], outs[0]),
name="while_vectorize",
dtype=dtype,
)
s = te.create_schedule(C.op)
try:
tvm.lower(s, [A, B, C], "llvm") |
assert False
except tvm.error.TVMError as e:
error_msg = str(e).split("\n")[-1]
expected = "A while loop inside a vectorized loop not supported"
assert expected in error_msg
def test_vectorize_dtype_mismatch():
n = tvm.tir.IntImm("int64", 4)
A = te.compute((n,), lambda i: tvm.tir.IntImm("int64", 2**31 - 1) + i, name="A")
s = te.create_schedule(A.op)
s[A].vectorize(A.op.axis[0])
tvm.lower(s, [A], "llvm", simple_mode=True)
if __name__ == "__main__":
test_vectorize_vector()
test_vectorize_with_if()
test_vectorize_loop()
test_vectorize_if_then_else()
test_vectorize_with_le_cond()
test_vectorize_with_ge_cond()
test_vectorize_let()
test_vectorize_while_fail()
test_vectorize_dtype_mismatch() |
import pytest |
import tvm
from tvm |
import tir, script
from tvm.script |
import tir as T
from tvm.tir |
import stmt_functor
from tvm.tir.usmp |
import utils as usmp_utils
from tvm.target |
import Target
from tvm |
import WorkspacePoolInfo, PoolInfoProperties
def _replace_stmt_with_buf_var_names(buffer_info_map):
"""helper to replace tir.allocates with buffer names"""
new_buffer_info_map = dict()
for k, v in buffer_info_map.items():
new_buffer_info_map[v.buffer_var.name] = k
return new_buffer_info_map
def _verify_conflicts(main_buf_name, conflicting_buf_names, buffer_info_map):
"""helper to check expected liveness conflicts"""
buf_info = buffer_info_map[main_buf_name]
for conflict in buf_info.conflicts:
assert conflict.name_hint in conflicting_buf_names
def _get_allocates(primfunc):
"""helper to extract all allocate nodes by name"""
allocates = dict()
def get_allocate(stmt):
if isinstance(stmt, tvm.tir.Allocate):
allocates[str(stmt.buffer_var.name)] = stmt
stmt_functor.post_order_visit(primfunc.body, get_allocate)
return allocates
def _assign_poolinfos_to_allocates_in_primfunc(primfunc, pool_infos):
"""helper to assing poolinfos to allocate nodes in a tir.PrimFunc"""
def set_poolinfos(stmt):
if isinstance(stmt, tvm.tir.Allocate):
return tvm.tir.Allocate(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
condition=stmt.condition,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: pool_infos},
)
return primfunc.with_body(stmt_functor.ir_transform(primfunc.body, None, set_poolinfos))
def _assign_poolinfos_to_allocates_in_irmodule(mod, pool_infos):
"""helper to assing poolinfos to allocate nodes in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = _assign_poolinfos_to_allocates_in_primfunc(basefunc, pool_infos)
return ret
def _assign_targets_to_primfuncs_irmodule(mod, target):
"""helper to assign target for PrimFunc |
in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
def _check_max_workspace_size(buffer_pool_allocations, pool_info, size):
max_workspace_size = 0
for buffer_info, pool_allocation in buffer_pool_allocations.items():
if pool_allocation.pool_info == pool_info:
size_candidate = pool_allocation.byte_offset + buffer_info.size_bytes
if size_candidate > max_workspace_size:
max_workspace_size = size_candidate
assert max_workspace_size == size
def test_no_pool_error():
target = Target("c")
tiny_workspace_pool = WorkspacePoolInfo(
"tiny_workspace",
[target],
PoolInfoProperties(size_hint_bytes=10),
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_a.set_conflicts([bi_b])
bi_b.set_conflicts([bi_c])
bi_c.set_conflicts([bi_a])
buffer_info_arr = [bi_a, bi_b, bi_c]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.greedy_by_size")
with pytest.raises(
tvm.TVMError, match="TVM USMP Error: the space available in the provided pools exceeded"
):
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
@pytest.mark.parametrize("algorithm", ["greedy_by_size", "greedy_by_conflicts", "hill_climb"])
def test_name_based_ordering(algorithm):
"""This checks when the size and conlicts are same a stable result is generated"""
def _test():
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
bi_a = usmp_utils.Buff |
erInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_a.set_conflicts([bi_b, bi_c])
bi_b.set_conflicts([bi_c, bi_a])
bi_c.set_conflicts([bi_a, bi_b])
buffer_info_arr = [bi_a, bi_b, bi_c]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
assert buffer_pool_allocations[bi_a].byte_offset == 20
assert buffer_pool_allocations[bi_b].byte_offset == 10
assert buffer_pool_allocations[bi_c].byte_offset == 0
for x in range(0, 10):
_test()
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 140), ("greedy_by_conflicts", 140), ("hill_climb", 140)],
)
def test_linear(algorithm, workspace_size):
"""
The test case here represent BufferInfo objects
that could get generated for a linear sequence
such as :
(Op A)
|
bi_a
|
(Op B)
|
bi_b
|
.
.
.
(Op F)
|
bi_f
"""
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=20, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=100, pool_candidates=[global_workspace_pool]
)
bi_d = usmp_utils.BufferInfo(
name_hint="bi_d", size_bytes=40, pool_candidates=[global_workspace_pool]
)
bi_e = usmp_utils.BufferInfo(
name_hint="bi_e", size_bytes=50, pool_candidates=[gl |
obal_workspace_pool]
)
bi_f = usmp_utils.BufferInfo(
name_hint="bi_f", size_bytes=50, pool_candidates=[global_workspace_pool]
)
bi_a.set_conflicts([bi_b])
bi_b.set_conflicts([bi_a, bi_c])
bi_c.set_conflicts([bi_b, bi_d])
bi_d.set_conflicts([bi_c, bi_e])
bi_e.set_conflicts([bi_d, bi_f])
bi_f.set_conflicts([bi_e])
buffer_info_arr = [bi_a, bi_b, bi_c, bi_d, bi_e, bi_f]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 190), ("greedy_by_conflicts", 320), ("hill_climb", 190)],
)
def test_fanout(algorithm, workspace_size):
"""
The test case here represent BufferInfo objects
that could get generated for a fanout topology
such as :
(Op A)
|
bi_a ---------
| |
(Op B) (Op C)
| |
bi_b bi_c
| |
(Op D) (Op E)
| |
bi_d bi_e
| |
(Op F) ------
|
bi_f
|
(Op G)
|
bi_g
"""
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
targets=[target],
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=20, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=100, pool_candidates=[global_workspace_pool]
)
bi_d = usmp_utils.BufferInfo(
name_hint="bi_d", size_bytes=40, pool_candidates=[global_workspace_pool]
)
bi_e = usmp_utils.BufferInfo(
name_hint="bi_e", size_bytes=50, pool_candidates=[global_workspace_pool]
)
bi_f = usmp_utils.BufferInfo(
name_hint="bi_ |
f", size_bytes=60, pool_candidates=[global_workspace_pool]
)
bi_g = usmp_utils.BufferInfo(
name_hint="bi_g", size_bytes=70, pool_candidates=[global_workspace_pool]
)
bi_a.set_conflicts([bi_b, bi_c])
bi_b.set_conflicts([bi_a, bi_c, bi_e])
bi_c.set_conflicts([bi_e, bi_a, bi_b, bi_d])
bi_d.set_conflicts([bi_b, bi_f, bi_c, bi_e])
bi_e.set_conflicts([bi_c, bi_f, bi_b, bi_d])
bi_f.set_conflicts([bi_d, bi_e, bi_f])
bi_g.set_conflicts([bi_f])
buffer_info_arr = [bi_a, bi_b, bi_c, bi_d, bi_e, bi_f, bi_g]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
@tvm.script.ir_module
class MobilenetStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast |
", "tir.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
PaddedInput_7 = T.decl_buffer([157323], "int16")
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7 = T.decl_buffer([64], "int32")
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = |
T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
tensor_2 = T.decl_buffer([200704], "uint8")
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9 = T.allocate([301056], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_de |
fault_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32"))
__tvm_meta__ = None
@pytest.mark.parametrize(
["algorithm", "fast_memory_size", "slow_memory_size"],
[
("greedy_by_size", 200704, 1418528),
("greedy_by_conflicts", 200704, 1418528),
("hill_climb", 200704, 1117462),
],
)
def test_mobilenet_subgraph(algorithm, fast_memory_size, slow_memory_size):
target = Target("c")
fast_memory_pool = WorkspacePoolInfo(
"fast_memory",
[target],
PoolInfoProperties(size_hint_bytes=200704),
)
slow_memory_pool = WorkspacePoolInfo(
"slow_memory",
[target],
)
tir_mod = MobilenetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
tir_mod, [fast_memory_pool, slow_memory_pool]
)
main_func = tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 1117718
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, buffer_info_analysis.memory_pressure)
buffer_info_map_names = dict()
for buf_info in buffer_info_arr:
buffer_info_map_names[buf_info.name_hint] = buf_info
_verify_conflicts("PaddedInput_7", ["sid_9", "sid_8", "Conv2dOutput_7"], buffer_info_map_names)
_verify_conflicts("tensor_2", ["sid_8"], buffer_info_map_names)
_verify_conflicts("sid_9", ["PaddedInput_7"], buffer_info_map_names)
_verify_conflicts(
"sid_8", ["PaddedInput_7", "Conv2dOutput_7", "tensor_2"], buffer_info_map_names
)
_verify_conflicts("Conv2dOutput_7", ["sid_8", "PaddedInput_7"], buffer_info_map_names)
_check_max_workspace_size(buffer_pool_allocations, slow_memory_pool, sl |
ow_memory_size)
_check_max_workspace_size(buffer_pool_allocations, fast_memory_pool, fast_memory_size)
@tvm.script.ir_module
class ResnetStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast(placeholder: T.handle, placeholder_1: T.handle, T_cast: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [360000], dtype="uint8")
placeholder_3 = T.match_buffer(placeholder_1, [64], dtype="int32")
T_cast_1 = T.match_buffer(T_cast, [360000], dtype="int16")
for ax0_ax1_fused, ax2, ax3_outer, ax3_inner in T.grid(75, 75, 4, 16):
T_cast_1[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.cast(placeholder_2[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner], "int32") - 94, 1843157232, 31, 1, dtype="int32") + placeholder_3[ax3_outer * 16 + ax3_inner], 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1(placeholder_10: T.handle, placeholder_11: T.handle, placeholder_12: T.handle, T_cast_4: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", "tir.noalias": True})
placeholder_13 = T.match_buffer(placeholder_10, [360000], dtype="int16")
placeholder_14 = T.match_buffer(placeholder_11, [36864], dtype="int16")
placeholder_15 = T.match_buffer(placeholder_12, [64], dtype="int32")
T_cast_5 = T.match_buffer(T_cast_4, [360000], dtype="int16")
PaddedInput_1 = T.decl_buffer([379456], "int16")
for i0_i1_fused_1, i2_1, i3_1 in T.grid(77, 77, 64):
PaddedInput_1[i0_i1_fused_1 * 4928 + i2_1 * 64 + i3_1] = T.if_then_else(1 <= i0_i1_fused_1 and i0_i1_fused_1 < 76 and 1 <= i2_1 an |
d i2_1 < 76, placeholder_13[i0_i1_fused_1 * 4800 + i2_1 * 64 + i3_1 - 4864], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_1 in T.serial(0, 5625):
Conv2dOutput_1 = T.decl_buffer([64], "int32")
for ff_1 in T.serial(0, 64):
Conv2dOutput_1[ff_1] = 0
for ry, rx, rc_1 in T.grid(3, 3, 64):
Conv2dOutput_1[ff_1] = Conv2dOutput_1[ff_1] + T.cast(PaddedInput_1[T.floordiv(ax0_ax1_fused_ax2_fused_1, 75) * 4928 + ry * 4928 + rx * 64 + T.floormod(ax0_ax1_fused_ax2_fused_1, 75) * 64 + rc_1], "int32") * T.cast(placeholder_14[ry * 12288 + rx * 4096 + rc_1 * 64 + ff_1], "int32")
for ax3_inner_2 in T.serial(0, 64):
T_cast_5[ax0_ax1_fused_ax2_fused_1 * 64 + ax3_inner_2] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_1[ax3_inner_2] + placeholder_15[ax3_inner_2], 1608879842, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_(placeholder_16: T.handle, placeholder_17: T.handle, placeholder_18: T.handle, T_add: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", "tir.noalias": True})
placeholder_19 = T.match_buffer(placeholder_16, [360000], dtype="int16")
placeholder_20 = T.match_buffer(placeholder_17, [16384], dtype="int16")
placeholder_21 = T.match_buffer(placeholder_18, [256], dtype="int32")
T_add_1 = T.match_buffer(T_add, [1440000], dtype="int32")
PaddedInput_2 = T.decl_buffer([360000], "int16")
for i0_i1_fused_2, i2_2, i3_2 in T.grid(75, 75, 64):
PaddedInput_2[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2] = placeholder_19[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2]
for ax0_ax1_fused_ax2_fused_2 in T.serial(0, 5625):
Conv2dOutput_2 = T.decl_buffer( |
[64], "int32")
for ax3_outer_1 in T.serial(0, 4):
for ff_2 in T.serial(0, 64):
Conv2dOutput_2[ff_2] = 0
for rc_2 in T.serial(0, 64):
Conv2dOutput_2[ff_2] = Conv2dOutput_2[ff_2] + T.cast(PaddedInput_2[ax0_ax1_fused_ax2_fused_2 * 64 + rc_2], "int32") * T.cast(placeholder_20[rc_2 * 256 + ax3_outer_1 * 64 + ff_2], "int32")
for ax3_inner_3 in T.serial(0, 64):
T_add_1[ax0_ax1_fused_ax2_fused_2 * 256 + ax3_outer_1 * 64 + ax3_inner_3] = T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_2[ax3_inner_3] + placeholder_21[ax3_outer_1 * 64 + ax3_inner_3], 1711626602, 31, -8, dtype="int32") + 132, 255), 0), "uint8"), "int32") - 132, 2094289803, 31, -2, dtype="int32") + 136
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_(placeholder_22: T.handle, placeholder_23: T.handle, placeholder_24: T.handle, placeholder_25: T.handle, T_cast_6: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_22, [360000], dtype="int16")
placeholder_27 = T.match_buffer(placeholder_23, [16384], dtype="int16")
placeholder_26 = T.match_buffer(placeholder_24, [256], dtype="int32")
placeholder_28 = T.match_buffer(placeholder_25, [1440000], dtype="int32")
T_cast_7 = T.match_buffer(T_cast_6, [1440000], dtype="uint8")
PaddedInput_3 = T.decl_buffer([360000], "int16")
for i0_i1_fused_3, i2_3, i3_3 in T.grid(75, 75, 64):
PaddedInput_3[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3] = placeholder_29[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3]
for ax0_ax1_fused_ax2_fused_3 in T.serial(0, 5625):
Conv2dOutput_3 = T.decl_buffer([64], "int32") |
for ax3_outer_2 in T.serial(0, 4):
for ff_3 in T.serial(0, 64):
Conv2dOutput_3[ff_3] = 0
for rc_3 in T.serial(0, 64):
Conv2dOutput_3[ff_3] = Conv2dOutput_3[ff_3] + T.cast(PaddedInput_3[ax0_ax1_fused_ax2_fused_3 * 64 + rc_3], "int32") * T.cast(placeholder_27[rc_3 * 256 + ax3_outer_2 * 64 + ff_3], "int32")
for ax3_inner_4 in T.serial(0, 64):
T_cast_7[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_3[ax3_inner_4] + placeholder_26[ax3_outer_2 * 64 + ax3_inner_4], 1343014664, 31, -8, dtype="int32") + 136, 255), 0), "uint8"), "int32") - 136, 1073903788, 31, 1, dtype="int32") + placeholder_28[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4], 255), 0), "uint8")
@T.prim_func
def tvmgen_default_run_model(input: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_2 = T.allocate([720000], "int8", "global")
sid_6 = T.allocate([5760000], "int8", "global")
sid_7 = T.allocate([720000], "int8", "global")
sid_8 = T.allocate([720000], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", input, T.lookup_param("p0", dtype="handle"), sid_2, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", sid_2, T.lookup_param("p3", dtype="handle"), T.lookup_param("p4", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_8, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_7, dtype="int32")) |
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", sid_7, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_6, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", sid_2, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_6, output, dtype="int32"))
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(placeholder_4: T.handle, placeholder_5: T.handle, placeholder_6: T.handle, T_cast_2: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", "tir.noalias": True})
placeholder_7 = T.match_buffer(placeholder_4, [360000], dtype="int16")
placeholder_8 = T.match_buffer(placeholder_5, [4096], dtype="int16")
placeholder_9 = T.match_buffer(placeholder_6, [64], dtype="int32")
T_cast_3 = T.match_buffer(T_cast_2, [360000], dtype="int16")
PaddedInput = T.decl_buffer([360000], "int16")
for i0_i1_fused, i2, i3 in T.grid(75, 75, 64):
PaddedInput[i0_i1_fused * 4800 + i2 * 64 + i3] = placeholder_7[i0_i1_fused * 4800 + i2 * 64 + i3]
for ax0_ax1_fused_ax2_fused in T.serial(0, 5625):
Conv2dOutput = T.decl_buffer([64], "int32")
for ff in T.serial(0, 64):
Conv2dOutput[ff] = 0
for rc in T.serial(0, 64):
Conv2dOutput[ff] = Conv2dOutput[ff] + T.cast(PaddedInput[ax0_ax1_fused_ax2_fused * 64 + rc], "int32") * T.cast(placeholder_8[rc * 64 + ff], "int32")
for ax3_inner_1 in T.serial(0, 64):
T_cast_3[ax0_ax1_fused_ax2_fused * 64 + ax3_inner_1] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput[ax3_inner_1] + placeholder_9[ax3_inner_1], 1843106743, 31, -6, dtype="int32 |
"), 255), 0), "uint8"), "int16")
__tvm_meta__ = None
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 7920256), ("greedy_by_conflicts", 7200256), ("hill_climb", 7200256)],
)
def test_resnet_subgraph(algorithm, workspace_size):
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
tir_mod = ResnetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_workspace_pool])
main_func = tir_mod["tvmgen_default_run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 7200256
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, buffer_info_analysis.memory_pressure)
buffer_info_map_names = dict()
for buf_info in buffer_info_arr:
buffer_info_map_names[buf_info.name_hint] = buf_info
_verify_conflicts(
"sid_7",
[
"PaddedInput_1",
"sid_2",
"Conv2dOutput_1",
"PaddedInput_2",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_3",
[
"PaddedInput_3",
"sid_6",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_6",
[
"Conv2dOutput_2",
"PaddedInput_2",
"sid_2",
"PaddedInput_3",
"Conv2dOutput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput",
[
"sid_8",
"sid_2",
"PaddedInput",
],
buffer_info_map_names,
)
_verify_conflicts( |
"PaddedInput_3",
[
"sid_6",
"sid_2",
"Conv2dOutput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_2",
[
"PaddedInput_2",
"sid_2",
"sid_6",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_1",
[
"sid_8",
"sid_2",
"sid_7",
"Conv2dOutput_1",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_1",
[
"sid_7",
"PaddedInput_1",
"sid_2",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput",
[
"sid_2",
"sid_8",
"Conv2dOutput",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_8",
[
"PaddedInput",
"sid_2",
"Conv2dOutput",
"PaddedInput_1",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_2",
[
"PaddedInput",
"sid_8",
"Conv2dOutput",
"PaddedInput_1",
"sid_7",
"Conv2dOutput_1",
"PaddedInput_2",
"Conv2dOutput_2",
"sid_6",
"PaddedInput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_2",
[
"sid_7",
"sid_2",
"Conv2dOutput_2",
"sid_6",
],
buffer_info_map_names,
)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
def test_custom_algo():
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
tir_mod = ResnetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(ti |
r_mod, [global_workspace_pool])
tir_mod = tir_mod.with_attr("executor", tvm.relay.backend.Executor("aot"))
tir_mod = tir_mod.with_attr("runtime", tvm.relay.backend.Runtime("crt"))
tir_mod["__tvm_main__"] = tir_mod[
"tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast"
]
algo_called = False
@tvm.register_func("tir.usmp.algo.trivial")
def _trivial_algo(buf_infos, mem_pressure):
nonlocal algo_called
algo_called = True
out_layout = {}
offset = 0
for buf_info in buf_infos:
pool_info = buf_info.pool_candidates[0]
out_layout[buf_info] = usmp_utils.PoolAllocation(pool_info, offset)
offset += buf_info.size_bytes
return out_layout
usmp_pass = tvm.get_global_func("tir.transform.UnifiedStaticMemoryPlanner")
usmp_pass()(tir_mod)
assert not algo_called
with tvm.transform.PassContext(config={"tir.usmp.custom_algorithm": "trivial"}):
usmp_pass()(tir_mod)
assert algo_called
with pytest.raises(
tvm.TVMError, match="The selected custom USMP algorithm : invalid is not defined"
):
with tvm.transform.PassContext(config={"tir.usmp.custom_algorithm": "invalid"}):
usmp_pass()(tir_mod) |
import sys |
import pytest |
import random |
import tvm |
import tvm.testing
from tvm.tir.usmp.utils |
import BufferInfo
from tvm |
import WorkspacePoolInfo, PoolInfoProperties
def _check_max_workspace_size(buffer_pool_allocations, pool_info, size, tolerance=0):
"""Helper to check maximum allocated memory size"""
max_workspace_size = 0
for buffer_info, pool_allocation in buffer_pool_allocations.items():
if pool_allocation.pool_info == pool_info:
size_candidate = pool_allocation.byte_offset + buffer_info.size_bytes
if size_candidate > max_workspace_size:
max_workspace_size = size_candidate
_diff = max_workspace_size.value - size
return (
(max_workspace_size.value == size if tolerance == 0 else tolerance > 100 * _diff / size),
"'{}': expected {} got {}, diff {:0.2f}% ({} bytes)".format(
pool_info.pool_name, size, max_workspace_size, 100 * _diff / size, _diff
),
)
def _verify_conflicts(buffer_info, pool_allocation, buffer_info_map):
"""Helper to check expected liveness conflicts"""
for conflict in buffer_info.conflicts:
conflict_pool_allocation = buffer_info_map[conflict]
if conflict_pool_allocation.pool_info == pool_allocation.pool_info:
assert conflict_pool_allocation.byte_offset != pool_allocation.byte_offset
l2 = max(
conflict_pool_allocation.byte_offset + conflict.size_bytes,
pool_allocation.byte_offset + buffer_info.size_bytes,
) - min(conflict_pool_allocation.byte_offset, pool_allocation.byte_offset)
assert (
conflict.size_bytes + buffer_info.size_bytes <= l2
), 'Conflicting: \n"{} @{}"\n"{} @{}"'.format(
conflict, conflict_pool_allocation, buffer_info, pool_allocation
)
def _verify_all_conflicts(buffer_pool_allocations):
"""Helper to verify liveness conflicts"""
for buffer_info, pool_allocation in buffer_pool_allocations.items():
_verify_conflicts(buffer_info, pool_allocation, buffer_pool_allocations)
def test_bounded(
random_len=150,
pools=[ |
WorkspacePoolInfo("default", [], PoolInfoProperties(65535)),
WorkspacePoolInfo("slow", []),
],
):
"""Tests two pools, one is bounded and one is not limited"""
random.seed(0)
mem_range = [BufferInfo(str(i), random.randrange(1, 65535), pools) for i in range(random_len)]
for mr in mem_range:
pr = random.choice(mem_range)
while pr in (*mr.conflicts, mr):
pr = random.choice(mem_range)
mr.set_conflicts([*mr.conflicts, pr])
pr.set_conflicts([*pr.conflicts, mr])
fusmp_algo = tvm.get_global_func("tir.usmp.algo.hill_climb")
result_map = fusmp_algo(mem_range, 0)
_verify_all_conflicts(result_map)
def __test_data_alloc_max():
"""Test data"""
intervals = [
(0, 159, 2048),
(0, 13, 7904),
(4, 35, 16),
(12, 17, 32768),
(16, 21, 32768),
]
return intervals
def __test_data_deep_speech():
"""Test data"""
intervals = [
(0, 159, 2048),
(0, 151, 2048),
(0, 13, 7904),
(2, 49, 16),
(4, 35, 16),
(6, 21, 16),
(12, 17, 32768),
(16, 21, 32768),
(20, 27, 32768),
(26, 31, 32768),
(30, 35, 32768),
(34, 41, 32768),
(40, 45, 32768),
(44, 49, 32768),
(48, 145, 32768),
(54, 59, 2048),
(58, 483, 4096),
(60, 65, 2048),
(64, 461, 4096),
(66, 71, 2048),
(70, 439, 4096),
(72, 77, 2048),
(76, 417, 4096),
(78, 83, 2048),
(82, 395, 4096),
(84, 89, 2048),
(88, 373, 4096),
(90, 95, 2048),
(94, 351, 4096),
(96, 101, 2048),
(100, 329, 4096),
(102, 107, 2048),
(106, 307, 4096),
(108, 113, 2048),
(112, 285, 4096),
(114, 119, 2048),
(118, 263, 4096),
(120, 125, 2048),
(124, 241, 4096),
(126, 131, 2048),
(130, 219, 4096),
(132, 137, 2048),
(136, 197, 4096),
(1 |
38, 143, 2048),
(142, 175, 4096),
(144, 149, 2048),
(148, 153, 4096),
(152, 163, 8192),
(154, 171, 2048),
(156, 181, 2048),
(160, 167, 2048),
(162, 165, 2048),
(168, 171, 2048),
(170, 509, 2048),
(174, 185, 8192),
(176, 193, 2048),
(178, 203, 2048),
(182, 189, 2048),
(184, 187, 2048),
(190, 193, 2048),
(192, 511, 2048),
(196, 207, 8192),
(198, 215, 2048),
(200, 225, 2048),
(204, 211, 2048),
(206, 209, 2048),
(212, 215, 2048),
(214, 513, 2048),
(218, 229, 8192),
(220, 237, 2048),
(222, 247, 2048),
(226, 233, 2048),
(228, 231, 2048),
(234, 237, 2048),
(236, 515, 2048),
(240, 251, 8192),
(242, 259, 2048),
(244, 269, 2048),
(248, 255, 2048),
(250, 253, 2048),
(256, 259, 2048),
(258, 517, 2048),
(262, 273, 8192),
(264, 281, 2048),
(266, 291, 2048),
(270, 277, 2048),
(272, 275, 2048),
(278, 281, 2048),
(280, 519, 2048),
(284, 295, 8192),
(286, 303, 2048),
(288, 313, 2048),
(292, 299, 2048),
(294, 297, 2048),
(300, 303, 2048),
(302, 521, 2048),
(306, 317, 8192),
(308, 325, 2048),
(310, 335, 2048),
(314, 321, 2048),
(316, 319, 2048),
(322, 325, 2048),
(324, 523, 2048),
(328, 339, 8192),
(330, 347, 2048),
(332, 357, 2048),
(336, 343, 2048),
(338, 341, 2048),
(344, 347, 2048),
(346, 525, 2048),
(350, 361, 8192),
(352, 369, 2048),
(354, 379, 2048),
(358, 365, 2048),
(360, 363, 2048),
(366, 369, 2048),
(368, 527, 2048),
(372, 383, 8192),
(374, 391, 2048),
(376, 401, 2048),
(380, 387, 2048),
(382, 385, 2048), |
(388, 391, 2048),
(390, 529, 2048),
(394, 405, 8192),
(396, 413, 2048),
(398, 423, 2048),
(402, 409, 2048),
(404, 407, 2048),
(410, 413, 2048),
(412, 531, 2048),
(416, 427, 8192),
(418, 435, 2048),
(420, 445, 2048),
(424, 431, 2048),
(426, 429, 2048),
(432, 435, 2048),
(434, 533, 2048),
(438, 449, 8192),
(440, 457, 2048),
(442, 467, 2048),
(446, 453, 2048),
(448, 451, 2048),
(454, 457, 2048),
(456, 535, 2048),
(460, 471, 8192),
(462, 479, 2048),
(464, 489, 2048),
(468, 475, 2048),
(470, 473, 2048),
(476, 479, 2048),
(478, 537, 2048),
(482, 493, 8192),
(484, 501, 2048),
(486, 497, 2048),
(490, 497, 2048),
(492, 495, 2048),
(496, 626, 2048),
(498, 501, 2048),
(500, 626, 2048),
(504, 549, 16),
(508, 543, 32768),
(542, 549, 32768),
(548, 555, 32768),
(554, 563, 464),
(560, 563, 256),
(562, 617, 2048),
(564, 567, 1856),
(566, 573, 1024),
(568, 619, 1024),
(570, 573, 1024),
(572, 577, 1024),
(576, 579, 1024),
(578, 605, 1024),
(580, 593, 1024),
(584, 587, 1024),
(586, 603, 1024),
(594, 597, 1024),
(596, 613, 1024),
(604, 607, 1024),
(606, 617, 1024),
(616, 621, 2048),
(618, 621, 1024),
(620, 626, 464),
]
return intervals
def __test_data_five():
"""Test data"""
return [
(4, 5, 95),
(1, 4, 52135),
(3, 4, 12136),
(3, 5, 62099),
(4, 5, 50458),
]
def __test_data_simple():
"""Test data"""
return [
(0, 23, 131072),
(4, 5, 65568),
(4, 9, 8192),
(8, 30, 15360),
(10, 11, 65568),
(10, 15, 4096),
(16, 17, 65552 |
),
(16, 21, 2048),
(22, 23, 32784),
(22, 27, 1024),
]
def find_maximum_from_intervals(intervals):
"""Expected list of intervals of (start, end, size)"""
sorted_list = sorted(intervals, key=lambda _: _[0])
max_mem = 0
for t in range(sorted_list[0][0], sorted_list[-1][1] + 1):
max_mem = max(
max_mem, sum([size for (start, end, size) in sorted_list if t >= start and t <= end])
)
return max_mem
@pytest.mark.parametrize(
"intervals",
[__test_data_alloc_max(), __test_data_simple(), __test_data_deep_speech(), __test_data_five()],
)
def test_intervals(intervals):
"""Tests supplied intervals"""
random.seed(0)
result = run_intervals(intervals, 5)
assert result["tir.usmp.algo.hill_climb"] == True, f" {result}"
def generate_range(sz, max_segment_sz=65535):
"""Helper func to generate list of size sz of ranges of random size max_segment_sz"""
for i in range(0, sz):
start = random.randrange(i, sz)
stop = random.randrange(start + 1, start + 2 + ((sz - start)
assert stop - start > 0
yield (start, stop, random.randrange(1, max_segment_sz))
def test_random_intervals(interval_len=16):
"""Tests randomly generated interval of length interval_len"""
random.seed(0)
intervals = list(generate_range(interval_len))
return run_intervals(intervals)
def run_intervals(intervals, tolerance=0):
"""Helper to run intervals"""
expected_mem = find_maximum_from_intervals(intervals)
pools = [WorkspacePoolInfo("default", [])]
buffers = []
for i, (start, stop, size) in enumerate(intervals):
buf = BufferInfo(str(i), size, pools)
buffers.append(buf)
for i, (i_start, i_stop, _) in enumerate(intervals):
conflicts = set()
for j, (j_start, j_stop, _) in enumerate(intervals):
start = min(i_start, j_start)
stop = max(i_stop, j_stop)
i_dur = i_stop - i_start + 1
j_dur = |
j_stop - j_start + 1
if i != j and (stop - start + 1 < i_dur + j_dur):
conflicts.add(buffers[j])
buffers[i].set_conflicts([c for c in sorted(conflicts, key=lambda c: c.name_hint)])
result = {}
for (alg, params) in [
("tir.usmp.algo.hill_climb", (expected_mem,)),
("tir.usmp.algo.greedy_by_size", (expected_mem,)),
]:
fusmp_algo = tvm.get_global_func(alg)
print("\n", "started", alg)
buffer_info_arr = fusmp_algo(buffers, *params)
print()
_verify_all_conflicts(buffer_info_arr)
result[alg], msg = _check_max_workspace_size(
buffer_info_arr, pools[0], expected_mem, tolerance
)
if not result[alg]:
print(alg, msg)
return result
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import sys |
import tvm
from tvm |
import tir, script
from tvm.ir |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.