text
stringlengths 1
2.05k
|
---|
]
]
),
np.array([4]),
]
verify_all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected,
output_format="tensorflow",
)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 0.9, 1.2],
]
]
).astype(np.float32)
scores = np.array([[[0.2, 0.3], [0.3, 0.2]]]).astype(np.float32)
iou_threshold = 0.3
score_threshold = 0.15
expected = np.array([[0, 0, 1], [0, 1, 0]])
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
]
]
).astype(np.float32)
scores = np.array([[[0.2]]]).astype(np.float32)
score_threshold = 0.4
expected = np.zeros((0, 3))
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
@tvm.testing.uses_gpu
def test_gather_nd():
def verify_gather_nd(data_shape, indices_shape, data_shape_np, indices_shape_np, batch_dims=0):
x = relay.var("x", relay.TensorType(data_shape, "float32"))
y = relay.var("y", relay.TensorType(indices_shape, "int32"))
z = relay.gather_nd(x, y, batch_dims=batch_dims, index_rank=indices_shape[0])
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
data_np = np.random.uniform(size=data_shape_np).astype("float32")
indices_np = np.random.randint(low=0, high=2, size=indices_shape_np, dtype="int32")
ref_res = ref_funcs.gather_nd(data_np, indices_np, batch_dims)
check_result([data_np, indices_np], mod, [ref_res])
verify_gather_nd((2, 2), (2, relay.Any()), (2, 2), (2, 3))
verify_gather_nd((relay.Any(), 2), (2, relay.Any()), (2, 2), (2, 3))
verify_gather_ |
nd((relay.Any(), 2), (1, relay.Any()), (10, 2), (1, 10), 1)
verify_gather_nd(
(relay.Any(), 2, 2, 3, 4), (3, relay.Any(), relay.Any()), (3, 2, 2, 3, 4), (3, 3, 2), 2
)
@tvm.testing.uses_gpu
def test_scatter_nd():
def verify_scatter_nd(data_np, indices_np, updates_np, ref_res):
indices_shape = (2, relay.Any())
updates_shape = (relay.Any(),)
data = relay.var("data", shape=data_np.shape, dtype=str(data_np.dtype))
indices = relay.var("indices", relay.TensorType(indices_shape, str(indices_np.dtype)))
updates = relay.var("updates", relay.TensorType(updates_shape, str(updates_np.dtype)))
out = relay.op.scatter_nd(data, indices, updates, "add")
mod = tvm.IRModule()
mod["main"] = relay.Function([data, indices, updates], out)
check_result([data_np, indices_np, updates_np], mod, [ref_res])
data = np.zeros((2, 2)).astype("int64")
indices = np.array([[1, 1, 0], [0, 1, 0]])
updates = np.array([2, 3, 0])
out = np.array([[0, 0], [2, 3]])
verify_scatter_nd(data, indices, updates, out)
@tvm.testing.uses_gpu
def test_gather():
def verify_gather(data_shape, indices_shape, data_shape_np, indices_shape_np, axis):
x = relay.var("x", relay.TensorType(data_shape, "float32"))
y = relay.var("y", relay.TensorType(indices_shape, "int32"))
z = relay.gather(x, axis, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
data_np = np.random.uniform(size=data_shape_np).astype("float32")
indices_np = np.random.randint(low=0, high=2, size=indices_shape_np, dtype="int32")
ref_res = tvm.topi.testing.gather_python(data_np, axis, indices_np)
check_result([data_np, indices_np], mod, [ref_res])
verify_gather((relay.Any(),), (relay.Any(),), (10,), (10,), 0)
verify_gather((2, 2), (2, relay.Any()), (2, 2), (2, 3), 1)
verify_gather((relay.Any(), 2), (2, relay.Any()), (2, 2), (2, 3), 1)
verify_gather((relay.Any(), relay.Any()), (relay.An |
y(), relay.Any()), (2, 3), (1, 3), 0)
@tvm.testing.uses_gpu
def test_searchsorted():
def verify_searchsorted(
sorted_sequence_shape, values_shape, sorted_sequence_shape_np, values_shape_np
):
x = relay.var("x", relay.TensorType(sorted_sequence_shape, "float32"))
y = relay.var("y", relay.TensorType(values_shape, "float32"))
z = relay.searchsorted(x, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.sort(np.random.uniform(size=sorted_sequence_shape_np).astype("float32"), axis=-1)
y_np = np.random.uniform(size=values_shape_np).astype("float32")
ref_res = searchsorted_ref(x_np, y_np, False, "int32")
check_result([x_np, y_np], mod, [ref_res])
for shape_np, values_shape_np in zip([(8, 9, 10), (10,), (11,)], [(8, 9, 20), (5,), (8, 9, 7)]):
sorted_sequence_shape = (relay.Any(),) * len(shape_np)
values_shape = (relay.Any(),) * len(values_shape_np)
verify_searchsorted(
sorted_sequence_shape,
values_shape,
shape_np,
values_shape_np,
)
if __name__ == "__main__":
tvm.testing.main() |
"""Test layout rewrite support for whole neural networks""" |
import sys |
import tempfile |
import pytest |
import numpy as np |
import tvm
from tvm |
import relay, auto_scheduler
from tvm.contrib |
import graph_executor |
import tvm.testing
def get_np_array(var, dtype):
return np.random.randn(*[int(x) for x in var.type_annotation.shape]).astype(dtype)
def get_relay_conv2d(
outc=32,
inc=32,
height=14,
width=14,
kh=3,
kw=3,
batch=1,
pad=0,
stride=1,
dilation=1,
layout="NHWC",
):
dtype = "float32"
if layout == "NHWC":
kernel_layout = "HWIO"
d = relay.var("data", shape=(batch, height, width, inc), dtype=dtype)
w = relay.var("weight", shape=(kh, kw, inc, outc), dtype=dtype)
elif layout == "NCHW":
kernel_layout = "OIHW"
d = relay.var("data", shape=(batch, inc, height, width), dtype=dtype)
w = relay.var("weight", shape=(outc, inc, kh, kw), dtype=dtype)
y = relay.nn.conv2d(
d,
w,
padding=pad,
kernel_size=(kh, kw),
strides=(stride, stride),
dilation=(dilation, dilation),
channels=outc,
groups=1,
data_layout=layout,
kernel_layout=kernel_layout,
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_conv3d(
outc=8,
inc=8,
depth=8,
height=7,
width=7,
kd=1,
kh=1,
kw=1,
batch=1,
pad=0,
stride=1,
dilation=1,
layout="NDHWC",
):
dtype = "float32"
if layout == "NDHWC":
kernel_layout = "DHWIO"
d = relay.var("data", shape=(batch, depth, height, width, inc), dtype=dtype)
w = relay.var("weight", shape=(kd, kh, kw, inc, outc), dtype=dtype)
elif layout == "NCDHW":
kernel_layout = "OIDHW"
d = relay.var("data", shape=(batch, inc, depth, height, width), dtype=dtype)
w = relay.var("weight", shape=(outc, inc, kd, kh, kw), dtype=dtype)
y = relay.nn.conv3d(
d,
w,
padding=pad,
kernel_size=(kd, kh, kw),
strides=(stride, stride, stride),
dilation=(dilation, dilation, dilation), |
channels=outc,
groups=1,
data_layout=layout,
kernel_layout=kernel_layout,
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_dense(m=128, n=128, k=128):
dtype = "float32"
d = relay.var("data", shape=(m, k), dtype=dtype)
w = relay.var("weight", shape=(n, k), dtype=dtype)
y = relay.nn.dense(d, w)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_batchmm(batch=4, m=128, n=128, k=128):
dtype = "float32"
d = relay.var("data", shape=(batch, m, k), dtype=dtype)
w = relay.var("weight", shape=(batch, n, k), dtype=dtype)
y = relay.nn.batch_matmul(d, w)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def tune_and_check(mod, data, weight, target, dev):
tasks, task_weights = auto_scheduler.extract_tasks(
mod, target=target, params={"weight": weight}
)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
tuner = auto_scheduler.TaskScheduler(tasks, task_weights, callbacks=[])
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=1,
num_measures_per_round=1,
builder=auto_scheduler.LocalBuilder(timeout=60),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option, search_policy="sketch.random")
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
):
lib = relay.build(mod, target=target, params={"weight": weight})
with tvm.transform.P |
assContext(opt_level=0):
lib2 = relay.build(mod, target=target, params={"weight": weight})
def get_output(data, lib):
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
return module.get_output(0).numpy()
actual_output = get_output(data, lib)
expected_output = get_output(data, lib2)
tvm.testing.assert_allclose(actual_output, expected_output, rtol=1e-4, atol=2e-4)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv2d(target, dev):
mod, data, weight = get_relay_conv2d(kh=1, kw=1)
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv2d_winograd(target, dev):
mod, data, weight = get_relay_conv2d(outc=128, kh=3, kw=3)
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv3d(target, dev):
mod, data, weight = get_relay_conv3d()
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_dense(target, dev):
mod, data, weight = get_relay_dense()
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_batch_matmul(target, dev):
mod, data, weight = get_relay_batchmm()
tune_and_check(mod, data, weight, target, dev)
if __name__ == "__main__":
tvm.testing.main() |
"""Test task extraction for auto-scheduler""" |
import json |
import tempfile |
import pytest |
import tvm.relay.testing |
import tvm.testing
from tvm |
import _ffi as _ffi_api
from tvm |
import auto_scheduler, relay
def get_network(name, batch_size=1, layout="NHWC"):
"""Get the symbol definition and random weight of a network"""
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
elif layout == "NCDHW":
image_shape = (3, 16, 224, 224)
elif layout == "NDHWC":
image_shape = (3, 224, 224, 16)
else:
raise ValueError("Invalid layout: " + layout)
if name == "resnet-18":
mod, params = relay.testing.resnet.get_workload(
num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "resnet-50":
mod, params = relay.testing.resnet.get_workload(
num_layers=50, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "winograd-test":
input_shape = [1, 23, 40, 32]
data = relay.var("data", shape=input_shape, dtype="float32")
net = relay.testing.layers.conv2d(
data=data,
channels=128,
kernel_size=3,
strides=1,
padding=1,
data_layout="NHWC",
kernel_layout="HWIO",
name="",
)
bias = relay.var("conv1_bias")
net = relay.nn.bias_add(net, bias, 3)
net = relay.nn.relu(net)
mod, params = relay.testing.create_workload(net)
elif name == "resnet3d-18":
mod, params = relay.testing.resnet_3d.get_workload(
num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "resnet3d-18":
mod, params = relay.testing.resnet_3d.get_workload(
num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "dcgan":
mod, params = relay.testing. |
dcgan.get_workload(batch_size=batch_size, layout=layout)
elif name == "mlp":
data = relay.var("data", shape=(batch_size, 32))
fc1 = relay.nn.dense(data, relay.var("fc1_weight"), units=32)
fc1 = relay.nn.bias_add(fc1, relay.var("fc1_bias"), axis=-1)
act1 = relay.nn.relu(fc1)
fc2 = relay.nn.dense(act1, relay.var("fc2_weight"), units=32)
fc2 = relay.nn.bias_add(fc2, relay.var("fc2_bias"), axis=-1)
act2 = relay.nn.relu(fc2)
mlp = act2
args = relay.analysis.free_vars(act2)
mlp = relay.Function(args, mlp)
mod, params = relay.testing.init.create_workload(mlp)
else:
raise ValueError("Unsupported network: " + name)
return mod, params
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"params",
[
("mlp", "NHWC", 1, 2),
("resnet-18", "NHWC", 24, 25),
("resnet-18", "NCHW", 24, 25),
("mobilenet", "NHWC", 22, 30),
("mobilenet", "NCHW", 22, 30),
("resnet3d-18", "NCDHW", 23, 24),
("resnet3d-18", "NDHWC", 23, 24),
],
)
def test_task_extraction_cuda(params):
target = tvm.target.Target("cuda")
network, layout, expected_task, expected_weights = params
mod, params = get_network(network, layout=layout)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for task, weight in zip(tasks, task_weights):
print(task.desc, task.workload_key, weight)
assert len(tasks) == expected_task
assert sum(task_weights) == expected_weights
@pytest.mark.parametrize(
"params",
[
("basic_func", 2, False),
("fused_func", 1, False),
("simple_func", 0, False),
("simple_func", 1, True),
("shape_of_func", 0, False),
("shape_of_func", 1, True),
("dyn_shape_func", 0, False),
("control_flow_func", 1, False),
("func_w_unsupported_op", 1, True),
],
)
def test_t |
ask_extraction_cpu(params):
ishape = (1, 3, 224, 224)
w1shape = (32, 3, 3, 3)
w2shape = (32, 32, 3, 3)
dtype = "float32"
target = tvm.target.Target("llvm")
def get_func():
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight2", shape=(w2shape), dtype=dtype)
conv2d = relay.nn.conv2d(data, weight1, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
conv2d = relay.nn.conv2d(relu, weight2, kernel_size=(3, 3), padding=(1, 1))
out = relay.nn.relu(conv2d)
return relay.Function([data, weight1, weight2], out)
def get_fused_func():
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight2", shape=(w2shape), dtype=dtype)
fused_func = get_func()
fused_func = fused_func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
call = relay.Call(fused_func, [data, weight1, weight2])
return relay.Function([data, weight1, weight2], call)
def get_simple_func():
data = relay.var("data", relay.TensorType((1, 2, 3), "float32"))
out = relay.image.affine_grid(data, (150, 150))
return relay.Function([data], out)
def get_shape_of_func():
data = relay.var("data", shape=(relay.Any(), 28, 28), dtype="float32")
out = relay.shape_of(data)
return relay.Function([data], out)
def get_func_with_dynamic_shape():
data = relay.var("data", shape=(relay.Any(), 32), dtype="float32")
out = relay.max(data)
return relay.Function(relay.analysis.free_vars(out), out)
def get_func_with_control_flow():
data = relay.var("data", shape=(1, 3, 224, 224))
weight = relay.var("weight", shape=(3, 3, 3, 3))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32") |
eq = relay.equal(eq1, eq2)
true_branch = relay.zeros(shape=(1, 3, 224, 224), dtype="float32")
false_branch = relay.nn.conv2d(data, weight, kernel_size=(3, 3), channels=3, padding=(1, 1))
false_branch = relay.nn.conv2d(
false_branch, weight, kernel_size=(3, 3), channels=3, padding=(1, 1)
)
ife = relay.If(eq, true_branch, false_branch)
out = relay.erf(ife)
return relay.Function([data, weight, eq1, eq2], out)
def get_func_with_unsupported_op():
def get_postproc_func():
data = relay.var("data", shape=((1, 3, 6)), dtype=dtype)
out = relay.nn.relu(data)
func = relay.Function([data], out)
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
return func
cls_prob = relay.var("cls_prob", relay.ty.TensorType((1, 3, 3), "float32"))
loc_pred = relay.var("loc_pred", relay.ty.TensorType((1, 3 * 4), "float32"))
anchors = relay.var("anchors", relay.ty.TensorType((1, 3, 4), "float32"))
mtl = relay.vision.multibox_transform_loc(
cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors
)
nms = relay.vision.non_max_suppression(mtl[0], mtl[1], mtl[0], return_indices=False)
out = relay.Call(get_postproc_func(), [nms])
return relay.Function([cls_prob, loc_pred, anchors], out)
func_map = {
"basic_func": get_func,
"fused_func": get_fused_func,
"simple_func": get_simple_func,
"shape_of_func": get_shape_of_func,
"dyn_shape_func": get_func_with_dynamic_shape,
"control_flow_func": get_func_with_control_flow,
"func_w_unsupported_op": get_func_with_unsupported_op,
}
def verify_task_extraction(func_name, expected_task, include_simple_tasks=False):
func = func_map[func_name]()
mod = tvm.IRModule.from_expr(func)
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"], None, target, include_simple_tasks=includ |
e_simple_tasks
)
assert len(tasks) == expected_task
assert len(task_weights) == expected_task
verify_task_extraction(*params)
def test_dump_workload_to_dag_extract_tasks():
mod, _ = get_network("mobilenet", layout="NHWC")
with tempfile.NamedTemporaryFile() as f:
tasks, _ = auto_scheduler.extract_tasks(
mod["main"], None, "llvm", include_simple_tasks=True, dump_workload_to_dag_log=f.name
)
expected = {task.workload_key: str(task.compute_dag) for task in tasks}
actual = json.load(f)
assert expected == actual
def test_custom_hash_func_extract_tasks():
@_ffi_api.register_func("auto_scheduler.compute_dag.hash_func")
def counting_unique_hash(str_dag):
ret = counting_unique_hash.i
counting_unique_hash.i += 1
return ret
counting_unique_hash.i = 0
mod, _ = get_network("mobilenet", layout="NHWC")
tasks, _ = auto_scheduler.extract_tasks(mod["main"], None, "llvm", include_simple_tasks=True)
hash_values = []
for task in tasks:
hash_value = int(task.workload_key[1:].split(",")[0])
hash_values.append(hash_value)
assert len(hash_values) == len(set(hash_values))
assert min(hash_values) == 0
assert max(hash_values) == counting_unique_hash.i - 1
if __name__ == "__main__":
pytest.main([__file__]) |
"""Test end-to-end network tuning with auto-scheduler""" |
import tempfile |
import numpy as np
from tvm |
import auto_scheduler, relay
from tvm.contrib |
import graph_executor |
import tvm.testing
from test_auto_scheduler_task_extraction |
import get_network
def tune_network(network, target):
mod, params = get_network(network)
target = tvm.target.Target(target)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
measure_ctx = auto_scheduler.LocalRPCMeasureContext(timeout=60, device=0)
tuner = auto_scheduler.TaskScheduler(tasks, task_weights, callbacks=[])
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=100,
num_measures_per_round=2,
early_stopping=1,
runner=measure_ctx.runner,
builder=auto_scheduler.LocalBuilder(timeout=60),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option, search_policy="sketch.random")
del measure_ctx
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
lib = relay.build(mod, target=target, params=params)
with auto_scheduler.ApplyHistoryBest([log_file, log_file]) as best:
assert isinstance(
best, auto_scheduler.dispatcher.ApplyHistoryBest
), "Unable to load multiple log files jointly."
loaded_recs = auto_scheduler.dispatcher.load_records(log_file)
with auto_scheduler.ApplyHistoryBest(iter(loaded_recs)) as best:
assert isinstance(
best, auto_scheduler.dispatcher.ApplyHistoryBest
), "Unable to ingest logs from an interator."
with auto_scheduler.ApplyHistoryBestOrSample(None, num_measure=2):
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
lib2 = relay.build(mod, target=target, params=params)
with tvm.transform.Pa |
ssContext(opt_level=0):
ref_lib = relay.build(mod, target=target, params=params)
def get_output(data, lib):
dev = tvm.cuda()
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
return module.get_output(0).numpy()
np.random.seed(0)
if network == "mlp":
data = np.random.uniform(size=(1, 32))
elif network == "winograd-test":
data = np.random.uniform(size=(1, 23, 40, 32))
else:
raise ValueError("Unknown network: " + network)
actual_output1 = get_output(data, lib)
actual_output2 = get_output(data, lib2)
expected_output = get_output(data, ref_lib)
tvm.testing.assert_allclose(actual_output1, expected_output, rtol=1e-4, atol=1e-4)
tvm.testing.assert_allclose(actual_output2, expected_output, rtol=1e-4, atol=1e-4)
@tvm.testing.requires_cuda
def test_tuning_cuda():
tune_network("mlp", "cuda")
tune_network("winograd-test", "cuda")
if __name__ == "__main__":
test_tuning_cuda() |
"""Test task extraction for autotvm""" |
import tvm.relay.testing
from tvm |
import relay
from tvm |
import autotvm
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
if name == "resnet-18":
mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=batch_size)
elif name == "resnet3d-18":
mod, params = relay.testing.resnet_3d.get_workload(num_layers=18, batch_size=batch_size)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size)
elif name == "dcgan":
mod, params = relay.testing.dcgan.get_workload(batch_size=batch_size)
input_shape = (batch_size, 100)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape
def test_task_extraction():
target = "llvm"
mod_list = []
params_list = []
conv2d = relay.op.get("nn.conv2d")
conv3d = relay.op.get("nn.conv3d")
conv2d_transpose = relay.op.get("nn.conv2d_transpose")
dense = relay.op.get("nn.dense")
mod, params, _ = get_network("resnet-18", batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(conv2d,)
)
assert len(tasks) == 12
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(conv2d,))
assert len(tasks) == 12
mod, params, _ = get_network("resnet-18", batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(dense,)
)
assert len(tasks) == 2
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(dense,))
assert len(tasks) == 2
mod, params, _ = get_network("resnet-18", batch_size=1)
mod_list.append(mod)
params_list.append(params)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(conv2d, dense)
)
assert len(tasks) == 14
tasks = autotvm.task.extract_from_program(
mod, target=target, params=p |
arams, ops=(conv2d, dense)
)
assert len(tasks) == 14
tasks = autotvm.task.extract_from_program(mod, target=target, params=params)
assert len(tasks) == 14
mod, params, _ = get_network("resnet3d-18", batch_size=1)
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(conv3d,))
assert len(tasks) == 12
mod, params, _ = get_network("mobilenet", batch_size=1)
mod_list.append(mod)
params_list.append(params)
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(conv2d, dense)
)
assert len(tasks) == 21
mod, params, _ = get_network("dcgan", batch_size=1)
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(conv2d_transpose,)
)
assert len(tasks) == 4
tasks = autotvm.task.extract_from_multiple_program(
mod_list, params_list, target=target, ops=(conv2d,)
)
assert len(tasks) == 31
def test_task_extraction_for_dense_int8_cuda():
target = "cuda"
dense = relay.op.get("nn.dense")
def get_net(batch, in_dim, out_dim, dtype, out_dtype):
data = tvm.relay.var("data", shape=[batch, in_dim], dtype=dtype)
weight = tvm.relay.var("weight", shape=[out_dim, in_dim], dtype=dtype)
out = relay.nn.dense(data, weight, out_dtype=out_dtype)
mod, params = relay.testing.create_workload(out)
return mod, params
mod, params = get_net(1, 16, 32, "float32", "float32")
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(dense,))
assert len(tasks) == 1 and tasks[0].name == "dense_small_batch.gpu"
mod, params = get_net(1, 16, 32, "int8", "int32")
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(dense,))
assert len(tasks) == 1 and tasks[0].name == "dense_int8.cuda"
if __name__ == "__main__":
test_task_extraction()
test_task_extraction_for_dense_int8_cuda() |
import numpy as np |
import pytest
from unittest.mock |
import patch |
import tvm |
import json
from tvm |
import relay
from tvm.contrib |
import graph_executor
from tvm.relay.op |
import add |
import tvm.testing
from tvm.relay.testing |
import mlp
from tvm |
import rpc
from tvm.contrib |
import utils
def check_rts(expr, args, expected_result, mod=None):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on both the evaluator and TVM runtime.
Parameters
----------
expr:
The expression to evaluate
args: list of Expr
The arguments to supply the expr.
expected_result:
The expected result of running the expression.
"""
eval_result = relay.create_executor("debug", mod=mod).evaluate(expr)(*args)
rts_result = relay.create_executor("graph", mod=mod).evaluate(expr)(*args)
tvm.testing.assert_allclose(eval_result.numpy(), rts_result.numpy())
tvm.testing.assert_allclose(eval_result.numpy(), expected_result)
def test_add_op_scalar():
"""
test_add_op_scalar:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=())
y = relay.var("y", shape=())
func = relay.Function([x, y], add(x, y))
x_y_data = [
(np.array(10.0, dtype="float32"), np.array(1.0, dtype="float32")),
(np.float32(10.0), np.float32(1.0)),
(10.0, 1.0),
]
for (x_data, y_data) in x_y_data:
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_scalar_int():
"""
test_add_op_scalar_int:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=(), dtype="int32")
y = relay.var("y", shape=(), dtype="int32")
func = relay.Function([x, y], add(x, y))
x_y_data = [
(np.array(10.0, dtype="int32"), np.array(1.0, dtype="int32")),
(np.int32(10), np.int32(1)),
(10, 1),
]
for (x_data, y_data) in x_y_data:
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_tensor():
"""
Program:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(10, 5))
func = relay.Function([x, y], add(x, y))
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.ra |
ndom.rand(10, 5).astype("float32")
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_broadcast():
"""
Program:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
func = relay.Function([x, y], add(x, y))
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
check_rts(func, [x_data, y_data], x_data + y_data)
def test_with_params():
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
z = relay.exp(z)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
params = {"y": y_data}
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input(**params)
mod.set_input(x=x_data)
mod.run()
res = mod.get_output(0).numpy()
ref_res = np.exp(y_data + x_data)
tvm.testing.assert_allclose(res, ref_res, atol=1e-5, rtol=1e-5)
def test_plan_memory():
x = relay.var("x", shape=(10,))
y = relay.var("x", shape=(1,))
y2 = relay.exp(y)
z = relay.add(x, y2)
z = relay.exp(z)
z = relay.exp(z)
z = relay.exp(z)
z = relay.exp(z)
z = relay.exp(z)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.transform.FuseOps(0)(mod)
func = mod["main"]
mod = relay.transform.InferType()(mod)
memory_plan = relay.backend._backend.GraphPlanMemory(func)
storage_ids = set()
device_types = set()
storage_sizes = {}
for k, v in memory_plan.expr_to_storage_info.items():
for x in v.storage_ids:
storage_ids.add(x)
storage_sizes[x] = v.storage_sizes
for x in v.device_types:
device_types.add(x)
assert len( |
storage_ids) == 4, f"found storage_ids: {storage_ids}"
assert len(device_types) == 1
assert len(storage_sizes) == 4
assert (
storage_sizes[0][0] == 40
and storage_sizes[1][0] == 4
and storage_sizes[2][0] == 4
and storage_sizes[3][0] == 40
)
def test_plan_2d_memory():
"""Verification if GraphPlanMemory manages 2d memory reffered as
global.texture* memory scopes in json file."""
global_virtual_device = tvm.target.VirtualDevice(memory_scope="global")
texture_virtual_device = tvm.target.VirtualDevice(memory_scope="global.texture")
metatable = {
"VirtualDevice": [
global_virtual_device,
texture_virtual_device,
]
}
mod = tvm.parser.parse(
"""
def @main(%data1: Tensor[(1, 32, 40, 40), float32],
%data2: Tensor[(1, 32, 40, 40), float32]) {
%0 = fn (%a, Primitive=1) {
layout_transform(%a, src_layout="NCHW", dst_layout="NCHW4c")
};
%1 = %0(%data1);
%3 = %0(%data2);
%5 = fn (%a {virtual_device=meta[VirtualDevice][0]},
%b {virtual_device=meta[VirtualDevice][0]},
virtual_device=meta[VirtualDevice][1],
Primitive=1) {
add(%a, %b)
};
%6 = %5(%1, %3);
%7 = fn (%a {virtual_device=meta[VirtualDevice][1]},
%b {virtual_device=meta[VirtualDevice][0]},
virtual_device=meta[VirtualDevice][1],
Primitive=1) {
add(%a, %b)
};
%8 = %7(%6, %3);
%9 = fn (%a {virtual_device=meta[VirtualDevice][1]},
%b {virtual_device=meta[VirtualDevice][1]},
virtual_device=meta[VirtualDevice][1],
Primitive=1) {
add(%a, %b)
};
%10 = %9(%8, %6);
%11 = fn (%a,
virtual_device=meta[VirtualDevice][0] |
,
Primitive=1) {
layout_transform(%a, src_layout="NCHW4c", dst_layout="NCHW")
};
%11(%10)
}
""",
"from_string",
None,
metatable,
)
GPU_DEVICE = tvm.device("cuda")
HOST_TARGET = tvm.target.Target("llvm")
GPU_TARGET = tvm.target.Target("cuda").with_host(HOST_TARGET)
GPU = tvm.target.VirtualDevice(GPU_DEVICE, GPU_TARGET)
CTXT = tvm.transform.PassContext(config={"relay.fallback_device_type": GPU.device_type_int})
config = tvm.target.make_compilation_config(CTXT, GPU_TARGET)
mod = relay.transform.InferType()(mod)
mod = relay.transform.PlanDevices(config)(mod)
func = mod["main"]
memory_plan = relay.backend._backend.GraphPlanMemory(func)
virtual_devices = {}
for k, v in memory_plan.expr_to_storage_info.items():
virtual_devices[v.storage_ids[0]] = v.virtual_devices[0].memory_scope
assert (
virtual_devices[0] == "global"
and virtual_devices[1] == "global"
and virtual_devices[2] == "global"
and virtual_devices[3] == "global"
and virtual_devices[4] == "global.texture"
and virtual_devices[5] == "global.texture"
and virtual_devices[6] == "global.texture"
)
def test_reshape_nop():
x = relay.var("x", shape=(10, 4))
xx = relay.abs(x)
y = relay.expand_dims(xx, axis=1)
t0 = relay.reshape(y, (1, 40))
t1 = relay.abs(y)
z0 = relay.reshape(t0, (2, 20))
z1 = relay.sqrt(t1)
z2 = relay.reshape(t1, (1, 40))
func = relay.Function([x], relay.Tuple([z0, z1, z2]))
x_data = np.random.rand(10, 4).astype("float32")
graph = relay.build(tvm.IRModule.from_expr(func), "llvm")
graph_json_str = graph.get_graph_json()
graph_json = json.loads(graph_json_str)
storage_ids = graph_json["attrs"]["storage_id"][1]
assert tuple(storage_ids) == (0, 1, 1, 2, 3, 2)
assert graph_json["nodes"][2]["attrs" |
]["func_name"] == "__nop"
assert graph_json["nodes"][5]["attrs"]["func_name"] == "__nop"
gmod = graph_executor.GraphModule(graph["default"](tvm.cpu(0)))
gmod.set_input(x=x_data)
gmod.run()
z0_np = x_data.reshape(2, 20)
z1_np = np.sqrt(
np.abs(
x_data.reshape(
10,
1,
4,
)
)
)
z2_np = np.abs(x_data).reshape(1, 40)
tvm.testing.assert_allclose(gmod.get_output(0).numpy(), z0_np)
tvm.testing.assert_allclose(gmod.get_output(1).numpy(), z1_np)
tvm.testing.assert_allclose(gmod.get_output(2).numpy(), z2_np)
@tvm.testing.uses_gpu
def test_gru_like():
def unit(rnn_dim):
X = relay.var("X", shape=(1, rnn_dim))
W = relay.var("y", shape=(3 * rnn_dim, rnn_dim))
matmul = relay.nn.dense(X, W)
splitted = relay.split(matmul, indices_or_sections=3, axis=1)
out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
return relay.Function([X, W], out)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def unit_numpy(X, W):
prod = np.dot(X, W.transpose())
splits = np.split(prod, indices_or_sections=3, axis=1)
return sigmoid(splits[0]) + np.tanh(splits[1]) * np.exp(splits[2])
dtype = "float32"
rnn_dim = 1000
x = np.random.rand(1, rnn_dim).astype(dtype)
y = np.random.rand(3 * rnn_dim, rnn_dim).astype(dtype) * 0.01 - 0.005
out_shape = (1, rnn_dim)
z = unit(rnn_dim)
for target, dev in tvm.testing.enabled_targets():
with tvm.transform.PassContext(opt_level=2):
graph, lib, params = relay.build(tvm.IRModule.from_expr(z), target)
m = graph_executor.create(graph, lib, dev)
m.set_input("X", tvm.nd.array(x.astype(dtype)))
m.set_input("y", tvm.nd.array(y.astype(dtype)))
m.set_input(**params)
m.run()
out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).numpy()
ref = unit_nump |
y(x, y)
tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
def test_compile_nested_tuples():
x = relay.var("x", shape=(10,))
x1 = x + relay.const(1.0)
x2 = x1 + relay.const(1.0)
x3 = x2 + relay.const(1.0)
x4 = x3 + relay.const(1.0)
out = relay.Tuple([x1, relay.Tuple([relay.Tuple([x2, x3]), x4])])
func = relay.Function([x], out)
graph, lib, _ = relay.build(tvm.IRModule.from_expr(func), "llvm")
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
x_data = np.random.uniform(size=(10,)).astype(np.float32)
mod.set_input(x=x_data)
mod.run()
assert mod.get_num_outputs() == 4
ref = x_data + 1
for i in range(mod.get_num_outputs()):
out = mod.get_output(i).numpy()
tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
ref = ref + 1
def test_compile_return_empty_tuple():
x = relay.var("x", shape=[16], dtype="float32")
mod = tvm.IRModule.from_expr(relay.Function([x], relay.Tuple([])))
graph, lib, _ = relay.build(mod, "llvm")
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.run()
@tvm.testing.uses_gpu
def test_compile_fused_identity_cast():
x = relay.var("x", shape=[16], dtype="float32")
y = relay.cast(x, "float32")
func1 = relay.Function([x], y).with_attr("Primitive", 1)
x = relay.var("x", shape=[16], dtype="float32")
y = relay.add(x, relay.const(3.14, "float32"))
func2 = relay.Function([x], relay.Tuple([x, y])).with_attr("Primitive", 1)
x_global = relay.var("xx", shape=[16], dtype="float32")
tup = func2(x_global)
y_global = func1(relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1))
mod = tvm.IRModule.from_expr(relay.Function([x_global], y_global))
for target, device in tvm.testing.enabled_targets():
with tvm.transform.PassContext(opt_level=2):
graph, lib, _ = relay.build(mod, target=target)
executor = graph_executor.create(graph, lib, device=device)
executor.run() |
def test_graph_executor_nested_tuples():
x, y, z, w = [relay.var(c, shape=(2, 3), dtype="float32") for c in "xyzw"]
out = relay.Tuple([x, relay.Tuple([y, relay.Tuple([z, w])])])
func = relay.Function([x, y, z, w], out)
f = relay.create_executor(
kind="graph", mod=tvm.IRModule.from_expr(func), device=tvm.cpu(0), target="llvm"
).evaluate()
data = [np.random.uniform(size=(2, 3)).astype("float32") for _ in "xyzw"]
out = f(*data)
assert len(out) == 2
tvm.testing.assert_allclose(out[0].numpy(), data[0])
assert len(out[1]) == 2
tvm.testing.assert_allclose(out[1][0].numpy(), data[1])
assert len(out[1][1]) == 2
tvm.testing.assert_allclose(out[1][1][0].numpy(), data[2])
tvm.testing.assert_allclose(out[1][1][1].numpy(), data[3])
def test_graph_executor_api():
dname_0, dname_1 = "data_0", "data_1"
data_0, data_1 = [relay.var(c, shape=(1, 1), dtype="float32") for c in [dname_0, dname_1]]
net = relay.add(data_0, data_1)
func = relay.Function((data_0, data_1), net)
lib = relay.build(tvm.IRModule.from_expr(func), "llvm")
mod = graph_executor.GraphModule(lib["default"](tvm.cpu(0)))
assert mod.get_input_index(dname_1) == 1
assert mod.get_input_index(dname_0) == 0
assert mod.get_input_index("Invalid") == -1
shape_dict, dtype_dict = mod.get_input_info()
assert isinstance(shape_dict, tvm.container.Map)
assert isinstance(dtype_dict, tvm.container.Map)
for data in [data_0, data_1]:
name = data.name_hint
ty = data.type_annotation
assert name in shape_dict
assert isinstance(shape_dict[name], tvm.runtime.container.ShapeTuple)
assert shape_dict[name] == tvm.runtime.container.ShapeTuple([i.value for i in ty.shape])
assert name in dtype_dict
assert isinstance(dtype_dict[name], tvm.runtime.container.String)
assert dtype_dict[name] == ty.dtype
@tvm.testing.requires_llvm
def test_benchmark():
mod, params = mlp.get_workload(1)
lib |
= relay.build(mod, target="llvm", params=params)
exe = graph_executor.create(lib.get_graph_json(), lib.lib, tvm.cpu())
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"))
result = exe.benchmark(tvm.cpu(), data=data, func_name="run", repeat=2, number=1)
assert result.mean == result.median
assert result.mean > 0
assert len(result.results) == 2
with patch.object(
tvm.runtime.module.Module,
"time_evaluator",
return_value=lambda: tvm.runtime.module.BenchmarkResult([1, 2, 2, 5]),
) as method:
result = exe.benchmark(tvm.cpu(), data=data, func_name="run", repeat=2, number=1)
assert result.mean == 2.5
assert result.median == 2.0
assert result.max == 5
assert result.min == 1
assert result.std == 1.5
@tvm.testing.parametrize_targets("cuda", "llvm")
def test_benchmark_end_to_end(dev, target):
mod, params = mlp.get_workload(1)
lib = relay.build(mod, target=target, params=params)
exe = graph_executor.create(lib.get_graph_json(), lib.lib, dev)
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"))
result = exe.benchmark(dev, data=data, func_name="run", repeat=2, number=1, end_to_end=True)
assert result.mean > 0
assert len(result.results) == 2
@tvm.testing.requires_cuda
def test_benchmark_end_to_end_rpc():
server = rpc.Server("127.0.0.1")
remote = rpc.connect(server.host, server.port)
mod, params = mlp.get_workload(1)
lib = relay.build(mod, target="cuda", params=params)
temp = utils.tempdir()
path = temp.relpath("library.so")
lib.export_library(path)
remote.upload(path)
rlib = remote.load_module("library.so")
dev = remote.device("cuda")
exe = graph_executor.create(lib.get_graph_json(), rlib, dev)
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"), device=dev)
result = exe.benchmark(dev, data=data, func_name="run", repeat=2, number=1, end_to_end=True)
assert result.mean > 0
assert len(res |
ult.results) == 2
if __name__ == "__main__":
pytest.main([__file__]) |
import numpy as np |
import pytest |
import tvm
from tvm |
import testing
from tvm |
import nd
from tvm |
import relay
from tvm.runtime |
import container
from tvm.relay.backend.interpreter |
import RefValue, ConstructorValue
from tvm.relay.scope_builder |
import ScopeBuilder
def check_eval(expr, args, expected_result, mod=None, rtol=1e-07):
for target in ["llvm"]:
dev = tvm.device(target, 0)
if not testing.device_enabled(target):
return
func = relay.create_executor(mod=mod, device=dev, target=target).evaluate(expr)
result = func if args is None else func(*args)
testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def test_tuple_value():
tv = container.tuple_object([relay.const(1), relay.const(2), relay.const(3)])
np.testing.assert_allclose(tv[0].data.numpy(), 1)
np.testing.assert_allclose(tv[1].data.numpy(), 2)
np.testing.assert_allclose(tv[2].data.numpy(), 3)
def test_tuple_getitem():
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))
check_eval(func, [], 1)
def test_id():
x = relay.var("x", "float32")
ident = relay.Function([x], x)
one = np.array(1.0, "float32")
check_eval(ident, [one], one)
def test_add_const():
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
check_eval(func, [], 2)
def test_mul_param():
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
check_eval(func, [x_data, y_data], x_data * y_data)
def test_equal():
i = relay.var("i", shape=[], dtype="int32")
j = relay.var("i", shape=[], dtype="int32")
z = relay.equal(i, j)
func = relay.Function([i, j], z, ret_type=relay.TensorType([], "bool"))
i_data = relay.const(0, "int32")
j_data = relay.const(0, "int32")
check_eval(func, [i_data, j_data], True)
def test_subtract():
i = relay.var("i", shape=[], dtype="int32")
sub = relay.subtract(i, relay.const(1, dtype="int32"))
func = relay.Function([i], sub, r |
et_type=relay.TensorType([], "int32"))
i_data = np.array(1, dtype="int32")
check_eval(func, [i_data], 0)
def test_simple_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
mod[sum_up] = func
i_data = np.array(10, dtype="int32")
check_eval(sum_up, [i_data], sum(range(1, 11)), mod=mod)
def test_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
accum = relay.var("accum", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, "int32"))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, "int32"))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
i_data = np.array(10, dtype="int32")
accum_data = np.array(0, dtype="int32")
check_eval(sum_up, [i_data, accum_data], sum(range(1, 11)), mod=mod)
def test_ref():
mod = tvm.IRModule()
three_with_ref = relay.GlobalVar("three_with_ref")
i = relay.Var("i")
iv = relay.Var("iv")
u = relay.Var("u")
uv = relay.Var("uv")
body = relay.add(iv, uv)
body = relay.Let(uv, relay.RefRead(i), body)
body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
body = relay.Let(iv, relay.RefRead(i), body)
body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
mod[three_with_ref] = relay.Function([], body)
check_eval(three_with_ref, [], 3, mod=mod)
def test_binds():
x = relay.v |
ar("x")
y = relay.add(x, x)
xx = np.ones((10, 20))
res = relay.create_executor().evaluate(y, binds={x: xx}).numpy()
testing.assert_allclose(xx + xx, res)
def test_kwargs_params():
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1, 10))
z = relay.var("z", shape=(1, 10))
f = relay.Function([x, y, z], x + y + z)
x_data = np.random.rand(1, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
z_data = np.random.rand(1, 10).astype("float32")
params = {"y": y_data, "z": z_data}
res = relay.create_executor().evaluate(f)(x_data, **params)
testing.assert_allclose(res.numpy(), x_data + y_data + z_data)
def test_function_taking_adt_ref_tuple():
mod = tvm.IRModule()
prelude = relay.prelude.Prelude(mod)
_, cons, nil = prelude.mod.get_type("List")
nil_value = ConstructorValue(nil.tag, [], nil)
cons_value = ConstructorValue(
cons.tag,
[nd.array(np.random.rand(1, 10).astype("float32")), nil_value],
cons,
)
ref_value = RefValue(nd.array(np.random.rand(1, 10).astype("float32")))
tuple_value = container.tuple_object(
[nd.array(np.random.rand(1, 10).astype("float32")) for _ in range(10)]
)
id_func = relay.create_executor(mod=mod).evaluate(prelude.id)
res_nil = id_func(nil_value)
assert res_nil.tag == nil_value.tag
assert len(res_nil.fields) == 0
res_cons = id_func(cons_value)
assert res_cons.tag == cons_value.tag
assert len(res_cons.fields) == len(cons_value.fields)
testing.assert_allclose(res_cons.fields[0].numpy(), cons_value.fields[0].numpy())
assert isinstance(res_cons.fields[1], ConstructorValue)
assert res_cons.fields[1].tag == nil.tag
assert len(res_cons.fields[1].fields) == 0
res_ref = id_func(ref_value)
testing.assert_allclose(res_ref.value.numpy(), ref_value.value.numpy())
res_tuple = id_func(tuple_value)
for i in range(10):
testing.assert_allclose(res_tuple[i].numpy(), tuple_value[i].numpy()) |
def test_tuple_passing():
x = relay.var(
"x",
type_annotation=relay.ty.TupleType(
[relay.ty.TensorType((), "int64"), relay.ty.TensorType((), "int64")]
),
)
fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))
mod = tvm.IRModule({})
gv = relay.GlobalVar("main")
mod[gv] = fn
mod = relay.transform.InferType()(mod)
dev = tvm.cpu()
target = tvm.target.Target("llvm")
f = relay.create_executor(mod=mod, device=dev, target=target).evaluate(gv)
out = f((10, 8))
testing.assert_allclose(out.numpy(), np.array(10))
value_tuple = container.tuple_object([nd.array(np.array(11)), nd.array(np.array(12))])
out = f(value_tuple)
testing.assert_allclose(out.numpy(), np.array(11))
def test_dynamic():
n = 3
m = 2
x = relay.Var("x", relay.TensorType([relay.Any(), m], "float32"))
y = relay.Var("y", relay.TensorType([relay.Any(), m], "float32"))
xx = x - relay.expr.const(3.0)
yy = y * relay.expr.const(5.0)
z = relay.op.concatenate([xx, yy], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.uniform(size=(n, m)).astype("float32")
y_np = np.random.uniform(size=(n, m)).astype("float32")
expected = np.concatenate([x_np - 3.0, y_np * 5.0], axis=0)
check_eval(None, [x_np, y_np], expected, mod)
def test_ref_global_from_expr():
n = 3
x = relay.Var("x", relay.TensorType([n], "float32"))
y = relay.Var("y", relay.TensorType([n], "float32"))
mod = tvm.IRModule()
mod["add"] = relay.Function([x, y], relay.add(x, y))
x_np = np.random.uniform(size=(n,)).astype("float32")
y_np = np.random.uniform(size=(n,)).astype("float32")
expected = np.add(x_np, y_np)
expr = relay.Call(mod.get_global_var("add"), [relay.const(x_np), relay.const(y_np)])
check_eval(expr, None, expected, mod)
def test_keyword_args():
n = 3
x = relay.Var("x", relay.TensorType([n], "float32"))
y = relay.Var("y", relay.TensorType([n], "float3 |
2"))
z = relay.add(x, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.uniform(size=(n,)).astype("float32")
y_np = np.random.uniform(size=(n,)).astype("float32")
expected = np.add(x_np, y_np)
actual = relay.create_executor(mod=mod).evaluate()(y=y_np, x=x_np)
testing.assert_allclose(actual.numpy(), expected)
@pytest.mark.skip(reason="closures are currently not directly Python callable")
def test_functional_returns():
n = 3
x = relay.Var("x", relay.TensorType([n], "float32"))
f = relay.Function([x], x)
t = relay.Tuple([f, f])
c = np.random.rand(n).astype("float32")
result1, result2 = relay.create_executor().evaluate(t)
testing.assert_allclose(result1(c).numpy(), c)
testing.assert_allclose(result2(c).numpy(), c)
if __name__ == "__main__":
pytest.main([__file__]) |
import pytest |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.target.target |
import Target
from tvm.relay.backend |
import Runtime, Executor, graph_executor_codegen
@pytest.mark.parametrize(
"test_target,unsupported_config",
[
["c", "-runtime=c"],
["c", "-system-lib=1"],
["c", "-executor=aot"],
["c", "-interface-api=c"],
["c", "-unpacked-api=1"],
["c", "-link-params=1"],
],
)
def test_deprecated_target_parameters(test_target, unsupported_config):
with pytest.raises(ValueError) as e_info:
Target(f"{test_target} {unsupported_config}")
assert f"Cannot recognize '{unsupported_config}" in str(e_info.execption)
def test_build_relay_graph_():
"""Test to build a simple relay graph by using APIs directly"""
def build_graph(mod, target):
target, target_host = tvm.target.Target.canon_target_and_host(target)
mod, _ = relay.optimize(mod, target)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
_, lowered_funcs, _ = grc.codegen(mod, mod["main"])
_ = relay.backend._backend.build(lowered_funcs, target)
def add(shape, dtype):
lhs = relay.var("A", shape=shape, dtype=dtype)
rhs = relay.var("B", shape=shape, dtype=dtype)
out = relay.add(lhs, rhs)
expr = relay.Function((lhs, rhs), out)
mod = tvm.IRModule.from_expr(expr)
return mod
build_graph(add((1, 8), "float32"), tvm.target.Target("llvm"))
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm
from tvm |
import relay
def test_callgraph_construct():
mod = tvm.IRModule({})
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
mod["g1"] = relay.Function([x, y], x + y)
call_graph = relay.analysis.CallGraph(mod)
assert "g1" in str(call_graph)
assert tvm.ir.structural_equal(mod, call_graph.module)
def test_print_element():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
mod["g0"] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
mod["g1"] = relay.Function([x1, y1], x1 - y1)
call_graph = relay.analysis.CallGraph(mod)
assert "
assert "
def test_global_call_count():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
g0 = relay.GlobalVar("g0")
mod[g0] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
g1 = relay.GlobalVar("g1")
mod[g1] = relay.Function([x1, y1], g0(x1, y1))
call_graph = relay.analysis.CallGraph(mod)
p0 = relay.var("p0", shape=(2, 3))
p1 = relay.var("p1", shape=(2, 3))
func = relay.Function([p0, p1], g0(p0, p1) * g1(p0, p1))
mod["main"] = func
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.global_call_count(g0) == 0
assert call_graph.global_call_count(g1) == 1
assert call_graph.global_call_count("main") == 2
def test_ref_count():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
g0 = relay.GlobalVar("g0")
mod[g0] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
g1 = relay.GlobalVar("g1")
mod[g1] = relay.Function([x1, y1], x1 - y1)
call_graph = relay.analysis.CallGraph(mod)
p0 = relay.var("p0", shape=(2, 3))
p1 = relay.var("p1", shape=(2, 3))
func = relay.Function([p0, p1], g0(p0, p1) * g1( |
p0, p1))
mod["main"] = func
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.ref_count(g0) == 1
assert call_graph.ref_count(g1) == 1
assert call_graph.ref_count("main") == 0
def test_nested_ref():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
g0 = relay.GlobalVar("g0")
mod[g0] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
g1 = relay.GlobalVar("g1")
mod[g1] = relay.Function([x1, y1], g0(x1, y1))
call_graph = relay.analysis.CallGraph(mod)
p0 = relay.var("p0", shape=(2, 3))
p1 = relay.var("p1", shape=(2, 3))
func = relay.Function([p0, p1], g0(p0, p1) * g1(p0, p1))
mod["main"] = func
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.ref_count(g0) == 2
assert call_graph.ref_count(g1) == 1
assert call_graph.ref_count("main") == 0
def test_recursive_func():
mod = tvm.IRModule({})
x = relay.var("x", shape=[], dtype="int32")
fn0 = relay.Function([x], x)
gx = relay.GlobalVar("gx")
mod[gx] = fn0
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = relay.ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
global_call = gx(i)
rec_call = relay.Call(sum_up, [one_less]) + global_call
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
func = func.with_attr("Compiler", "a")
mod[sum_up] = func
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.is_recursive(sum_up)
assert call_graph.ref_count(sum_up) == 2
assert call_graph.ref_count(gx) == 1
assert call_graph.ref_count("main") |
== 0
if __name__ == "__main__":
pytest.main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay.testing import synthetic
from tvm.relay import transform
def test_change_batch_synthetic():
net, params = synthetic.get_workload()
new_net = transform.ChangeBatch({net["main"].params[0]: 0}, batch_size=123)(net)
assert new_net["main"].checked_type.ret_type.shape[0] == 123
if __name__ == "__main__":
test_change_batch_synthetic()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm import relay
a = relay.Var("a")
b = relay.expr.const(1.0, dtype="float32")
c = a < b
d = relay.less(a, b)
assert c.astext() == d.astext()
c = a > b
d = relay.greater(a, b)
assert c.astext() == d.astext()
c = a >= b
d = relay.greater_equal(a, b)
assert c.astext() == d.astext()
c = a <= b
d = relay.less_equal(a, b)
assert c.astext() == d.astext()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
from tvm.relay.frontend.common import infer_type
from tvm.relay import op as _op
def test_const_dtype():
strides = (1, 1)
np_array = np.array(strides).astype("int32")
strides = _op.const(np_array, dtype="int64")
# strides needs to be autoconverted to int64 on Windows
assert infer_type(strides).checked_type.dtype == np.dtype(np.int64)
a = tvm.nd.array(np.random.randint(0, high=255, size=(2, 3), dtype="uint8"))
a = _op.const(a, dtype="uint8")
aa = a.data.numpy()
assert aa.dtype == np.dtype(np.uint8)
b = _op.const(1, dtype="int8")
bb = b.data.numpy()
assert bb.dtype == np.dtype(np.int8)
kshape = (3, 10, 3, 3)
w = relay.const(np.zeros(kshape, dtype="float32"))
assert w.data.numpy().dtype == np.dtype(np.float32)
|
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import relay, runtime
from tvm.contrib.nvcc |
import have_fp16 |
import tvm.testing
def test_basic_build():
tgt = "llvm"
dev = tvm.cpu()
a = relay.var("a", dtype="float32", shape=(16, 8))
b = relay.var("b", dtype="float32", shape=(8, 8))
c = relay.var("c", dtype="float32", shape=(16, 8))
x = relay.nn.dense(a, b)
y = relay.nn.relu(x)
z = y + c
func = relay.Function([a, b, c], z)
A = tvm.nd.array(np.random.uniform(-1, 1, (16, 8)).astype("float32"), device=dev)
B = tvm.nd.array(np.random.uniform(-1, 1, (8, 8)).astype("float32"), device=dev)
C = tvm.nd.array(np.random.uniform(-1, 1, (16, 8)).astype("float32"), device=dev)
params = {"b": B, "c": C}
targets = {tvm.tir.IntImm("int32", dev.device_type): tgt}
mod = tvm.IRModule.from_expr(func)
func_in_mod = mod["main"]
assert mod["main"] == func_in_mod, "cannot compare function to itself"
lib = relay.build(mod, targets, "llvm", params=params)
assert mod["main"] == func_in_mod, "relay.build changed module in-place"
rt = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
rt.set_input("a", A)
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(
out.numpy(),
np.maximum(np.dot(A.numpy(), B.numpy().T), 0) + C.numpy(),
atol=1e-5,
rtol=1e-5,
)
@tvm.testing.requires_cuda
def test_fp16_build():
dtype = "float16"
dev = tvm.cuda(0)
if dtype == "float16" and not have_fp16(dev.compute_version):
print("skip because gpu does not support fp16")
return
x = relay.var("x", dtype=dtype, shape=(4, 4))
y = relay.var("y", dtype=dtype, shape=(4, 4))
z = x + y
func = relay.Function([x, y], z)
X = tvm.nd.array(np.random.uniform(-1, 1, (4, 4)).astype(dtype), device=dev)
Y = tvm.nd.array(np.random.uniform(-1, 1, (4, 4)).astype(dtype), device=dev)
params = {
"x": X,
"y": Y,
}
g_json, mmod, params = relay.build(func, "cuda", params=params)
rt = tvm.contrib.graph_executor.create(g_json, mmod, dev)
rt |
.load_params(runtime.save_param_dict(params))
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(out.numpy(), X.numpy() + Y.numpy(), atol=1e-5, rtol=1e-5)
@tvm.testing.requires_llvm
def test_bf16_build():
data = relay.var("data", shape=(1, 3, 224, 224), dtype="float32")
weight = relay.var("weight", shape=(64, 3, 7, 7), dtype="float32")
bn_gamma = relay.var("gamma", shape=(64,), dtype="float32")
bn_beta = relay.var("beta", shape=(64,), dtype="float32")
bn_mean = relay.var("mean", shape=(64,), dtype="float32")
bn_var = relay.var("var", shape=(64,), dtype="float32")
params = {
"weight": np.random.uniform(-1, 1, size=(64, 3, 7, 7)).astype("float32"),
"gamma": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
"beta": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
"mean": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
"var": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
}
conv_bf16 = relay.nn.conv2d(
relay.cast(data, "bfloat16"),
relay.cast(weight, "bfloat16"),
strides=(2, 2),
padding=(3, 3, 3, 3),
channels=64,
kernel_size=(7, 7),
out_dtype="bfloat16",
)
bn_bf16 = relay.nn.batch_norm(
conv_bf16,
relay.cast(bn_gamma, "bfloat16"),
relay.cast(bn_beta, "bfloat16"),
relay.cast(bn_mean, "bfloat16"),
relay.cast(bn_var, "bfloat16"),
)
relu_bf16 = relay.nn.relu(bn_bf16[0])
maxpool_bf16 = relay.nn.max_pool2d(relu_bf16, pool_size=(2, 2), strides=(2, 2))
avgpool_bf16 = relay.nn.avg_pool2d(maxpool_bf16, pool_size=(2, 2), strides=(2, 2))
flattened_bf16 = relay.nn.batch_flatten(avgpool_bf16)
softmax_bf16 = relay.nn.softmax(flattened_bf16)
mod_bf16 = tvm.IRModule.from_expr(softmax_bf16)
with tvm.transform.PassContext(opt_level=3):
relay.build(mod_bf16, target="llvm", params=params)
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_fp16_conversion(target |
, dev):
if target == "cuda" and not have_fp16(dev.compute_version):
print("skip because gpu does not support fp16")
return
n = 10
for (src, dst) in [("float32", "float16"), ("float16", "float32")]:
x = relay.var("x", relay.TensorType((n,), src))
y = x.astype(dst)
func = relay.Function([x], y)
X = tvm.nd.array(n * np.random.randn(n).astype(src) - n / 2)
with tvm.transform.PassContext(opt_level=1):
g_json, mmod, params = relay.build(tvm.IRModule.from_expr(func), target)
rt = tvm.contrib.graph_executor.create(g_json, mmod, dev)
rt.set_input("x", X)
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(out.numpy(), X.numpy().astype(dst), atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_basic_build()
test_fp16_build()
test_fp16_conversion()
test_bf16_build() |
import numpy as np |
import tvm
from tvm |
import relay
from tvm.relay.build_module |
import bind_params_by_name
from tvm.relay.dataflow_pattern |
import *
from tvm.relay.testing |
import run_opt_pass
K_ELEMWISE = 0
K_BROADCAST = 1
def test_expr_pattern():
ep = is_expr(relay.var("x", shape=(4, 1)))
assert isinstance(ep, ExprPattern)
assert isinstance(ep.expr, relay.Var)
def test_var_pattern():
v = is_var("x")
assert isinstance(v, VarPattern)
assert v.name == "x"
def test_constant_pattern():
c = is_constant()
assert isinstance(c, ConstantPattern)
def test_wildcard_pattern():
wc = wildcard()
assert isinstance(wc, WildcardPattern)
def test_CallPattern():
wc1 = wildcard()
wc2 = wildcard()
c = is_op("add")(wc1, wc2)
assert isinstance(c, CallPattern)
assert isinstance(c.args[0], WildcardPattern)
assert isinstance(c.args[1], WildcardPattern)
def test_FunctionPattern():
wc1 = wildcard()
wc2 = wildcard()
c = is_op("add")(wc1, wc2)
f = FunctionPattern([wc1, wc2], c)
assert isinstance(f, FunctionPattern)
assert isinstance(f.params[0], WildcardPattern)
assert isinstance(f.params[1], WildcardPattern)
assert isinstance(f.body, CallPattern)
assert isinstance(f.body.args[0], WildcardPattern)
assert isinstance(f.body.args[1], WildcardPattern)
def test_TuplePattern():
wc1 = wildcard()
wc2 = wildcard()
t = is_tuple([wc1, wc2])
assert isinstance(t, TuplePattern)
assert isinstance(t.fields[0], WildcardPattern)
assert isinstance(t.fields[1], WildcardPattern)
def test_TupleGetItemPattern():
wc1 = wildcard()
wc2 = wildcard()
t = is_tuple([wc1, wc2])
tgi = is_tuple_get_item(t, 1)
assert isinstance(tgi, TupleGetItemPattern)
assert isinstance(tgi.tuple, TuplePattern)
assert isinstance(tgi.tuple.fields[0], WildcardPattern)
assert isinstance(tgi.tuple.fields[1], WildcardPattern)
def test_AltPattern():
is_add_or_sub = is_op("add") | is_op("subtract")
assert isinstance(is_add_or_sub, AltPattern)
def test_TypePattern():
ttype = relay.TensorType((10, 10), "float32")
ty_pat = has_type(ttype)
assert isinstance(ty_pat, TypeP |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.