text
stringlengths 1
2.05k
|
---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::process::Command;
macro_rules! mf_dir {
($p:literal) => {
concat!(env!("CARGO_MANIFEST_DIR"), $p)
};
} |
fn main() {
let out_dir = std::env::var("OUT_DIR").unwrap();
let build_output = Command::new(mf_dir!("/src/build_model.py"))
.arg(&out_dir)
.env(
"PYTHONPATH",
concat!(
mf_dir!("/../../python"),
":",
mf_dir!("/../../nnvm/python")
),
)
.output()
.expect("Failed to build model");
assert!(
["model.o", "graph.json", "params.bin"]
.iter()
.all(|f| { std::path::Path::new(&format!("{}/{}", out_dir, f)).exists() }),
"Could not build tvm lib: STDOUT:\n\n{}\n\nSTDERR\n\n{}",
String::from_utf8(build_output.stdout).unwrap().trim(),
String::from_utf8(build_output.stderr).unwrap().trim()
);
let sysroot_output = Command::new("rustc")
.args(&["--print", "sysroot"])
.output()
.expect("Failed to get sysroot");
let sysroot = String::from_utf8(sysroot_output.stdout).unwrap();
let sysroot = sysroot.trim();
let mut llvm_tools_path = std::path::PathBuf::from(&sysroot);
llvm_tools_path.push("lib/rustlib/x86_64-unknown-linux-gnu/bin");
Command::new("rustup")
.args(&["component", "add", "llvm-tools-preview"])
.output()
.expect("failed to install llvm tools");
std::process::Command::new(llvm_tools_path.join("llvm-objcopy"))
.arg("--globalize-symbol=__tvm_module_startup")
.arg("--remove-section=.ctors")
.arg(&format!("{}/model.o", out_dir))
.output()
.expect("gould not gloablize startup function");
std::process::Command::new(llvm_tools_path.join("llvm-ar"))
.arg("rcs")
.arg(&format!("{}/libmodel.a", out_dir))
.arg(&format!("{}/model.o", out_dir))
.output()
.expect("failed to package model archive");
println!("cargo:rustc-link-lib=static=model");
println!("cargo:rustc-link-search=native={}", out_dir);
} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import struct
import sys
import numpy as np
def float_bytes(l):
for i in range(0, len(l), 4):
yield l[i : i + 4]
floats = [struct.unpack("f", f)[0] for f in float_bytes(sys.stdin.buffer.read())]
print(np.array(floats))
|
#!/usr/bin/python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Creates a simple TVM modules."""
import os
from os import path as osp
import sys
from tvm import relay, runtime
from tvm.relay import testing
import tvm
from tvm import te
def main():
dshape = (1, 28, 28)
net, params = relay.testing.mlp.get_workload(batch_size=dshape[0], dtype="float32")
dshape = (1, 3, 224, 224)
net, params = relay.testing.resnet.get_workload(
layers=18, batch_size=dshape[0], image_shape=dshape[1:]
)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(
net,
"llvm",
params=params,
runtime=tvm.relay.backend.Runtime("cpp", {"system-lib": True}),
)
build_dir = osp.abspath(sys.argv[1])
if not osp.isdir(build_dir):
os.makedirs(build_dir, exist_ok=True)
lib.save(osp.join(build_dir, "model.o"))
with open(osp.join(build_dir, "graph.json"), "w") as f_graph_json:
f_graph_json.write(graph)
with open(osp.join(build_dir, "params.bin"), "wb") as f_params:
f_params.write(runtime.save_param_dict(params))
if __name__ == "__main__":
main()
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
extern crate tvm_runtime;
use std::{
convert::TryFrom as _,
io::{Read as _, Write as _},
};
fn main() {
let syslib = tvm_runtime::SystemLibModule::default();
let graph_json = include_str!(concat!(env!("OUT_DIR"), "/graph.json"));
let params_bytes = include_bytes!(concat!(env!("OUT_DIR"), "/params.bin"));
let params = tvm_runtime::load_param_dict(params_bytes).unwrap();
let graph = tvm_runtime::Graph::try_from(graph_json).unwrap();
let mut exec = tvm_runtime::GraphExecutor::new(graph, &syslib).unwrap();
exec.load_params(params);
let listener = std::net::TcpListener::bind("127.0.0.1:4242").unwrap();
for stream in listener.incoming() {
let mut stream = stream.unwrap();
if let Err(_) =
stream.read_exact(exec.get_input("data").unwrap().data().view().as_mut_slice())
{
continue;
}
exec.run();
if let Err(_) = stream.write_all(exec.get_output(0).unwrap().data().as_slice()) {
continue;
}
}
}
|
"""Test script for tf op module""" |
import tempfile |
import os |
import logging |
import tensorflow as tf |
import numpy as np |
import tvm
from tvm |
import te
from tvm.contrib |
import tf_op
def test_use_tvmdso_op():
"""main test function"""
def export_cpu_add_lib():
"""create cpu add op lib"""
n = te.var("n")
ph_a = te.placeholder((n,), name="ph_a")
ph_b = te.placeholder((n,), name="ph_b")
ph_c = te.compute(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name="ph_c")
sched = te.create_schedule(ph_c.op)
fadd_dylib = tvm.build(sched, [ph_a, ph_b, ph_c], "c", name="vector_add")
lib_path = tempfile.mktemp("tvm_add_dll.so")
fadd_dylib.export_library(lib_path)
return lib_path
def export_gpu_add_lib():
"""create gpu add op lib"""
n = te.var("n")
ph_a = te.placeholder((n,), name="ph_a")
ph_b = te.placeholder((n,), name="ph_b")
ph_c = te.compute(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name="ph_c")
sched = te.create_schedule(ph_c.op)
b_axis, t_axis = sched[ph_c].split(ph_c.op.axis[0], factor=64)
sched[ph_c].bind(b_axis, te.thread_axis("blockIdx.x"))
sched[ph_c].bind(t_axis, te.thread_axis("threadIdx.x"))
fadd_dylib = tvm.build(sched, [ph_a, ph_b, ph_c], "cuda", name="vector_add")
lib_path = tempfile.mktemp("tvm_add_cuda_dll.so")
fadd_dylib.export_library(lib_path)
return lib_path
def test_add(session, lib_path, tf_device):
"""test add lib with TensorFlow wrapper"""
module = tf_op.OpModule(lib_path)
left = tf.placeholder("float32", shape=[4])
right = tf.placeholder("float32", shape=[4])
feed_dict = {left: [1.0, 2.0, 3.0, 4.0], right: [5.0, 6.0, 7.0, 8.0]}
expect = np.asarray([6.0, 8.0, 10.0, 12.0])
add1 = module.func("vector_add", output_shape=[4], output_dtype="float")
add2 = module.func("vector_add", output_shape=tf.shape(left), output_dtype="float")
add3 = module.func("vector_add", output_shape=[tf.shape(left)[0]], output_dtype="float")
with tf.device(tf_device):
output1 = session.run(add1(left, right), f |
eed_dict)
np.testing.assert_equal(output1, expect)
output2 = session.run(add2(left, right), feed_dict)
np.testing.assert_equal(output2, expect)
output3 = session.run(add3(left, right), feed_dict)
np.testing.assert_equal(output3, expect)
def cpu_test(session):
"""test function for cpu"""
cpu_lib = None
try:
cpu_lib = export_cpu_add_lib()
test_add(session, cpu_lib, "/cpu:0")
finally:
if cpu_lib is not None:
os.remove(cpu_lib)
def gpu_test(session):
"""test function for gpu"""
gpu_lib = None
try:
gpu_lib = export_gpu_add_lib()
test_add(session, gpu_lib, "/gpu:0")
finally:
if gpu_lib is not None:
os.remove(gpu_lib)
with tf.Session() as session:
if tvm.runtime.enabled("cpu"):
logging.info("Test TensorFlow op on cpu kernel")
cpu_test(session)
if tvm.runtime.enabled("gpu"):
logging.info("Test TensorFlow op on gpu kernel")
gpu_test(session)
if __name__ == "__main__":
test_use_tvmdso_op() |
import os |
import tvm
from tvm |
import te
from tvm.contrib |
import nvcc |
import numpy as np
from tvm |
import topi
TASK = "reduce_map"
USE_MANUAL_CODE = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_broadcast_to(in_shape, out_shape):
global TASK
TASK = (
"bcast_to_i"
+ "_".join([str(ele) for ele in in_shape])
+ "o"
+ "_".join([str(ele) for ele in out_shape])
)
A = te.placeholder(shape=in_shape, name="A")
B = topi.broadcast_to(A, out_shape)
s = topi.cuda.schedule_broadcast(B)
fcuda = tvm.build(s, [A, B], "cuda", name="broadcast_to")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.broadcast_to(data_npy, out_shape)
data_nd = tvm.nd.array(data_npy, tvm.cuda())
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), tvm.cuda())
for _ in range(2):
fcuda(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
def test_broadcast_binary_op(lhs_shape, rhs_shape, typ="add"):
global TASK
TASK = (
"bcast_binary_"
+ typ
+ "_lhs"
+ "_".join([str(ele) for ele in lhs_shape])
+ "rhs"
+ "_".join([str(ele) for ele in rhs_shape])
)
A = te.placeholder(shape=lhs_shape, name="A")
B = te.placeholder(shape=rhs_shape, name="B")
if typ == "add":
C = topi.broadcast_add(A, B)
elif typ == "sub":
C = topi.broadcast_sub(A, B)
elif typ == "div":
C = topi.broadcast_div(A, B)
elif typ == "mul":
C = topi.broadcast_mul(A, B)
elif typ == "maximum":
C = topi.broadcast_maximum(A, B)
elif typ == "mi |
nimum":
C = topi.broadcast_minimum(A, B)
else:
raise NotImplementedError
s = topi.cuda.schedule_broadcast(C)
fcuda = tvm.build(s, [A, B, C], "cuda", name="broadcast_binary" + "_" + typ)
lhs_npy = np.random.uniform(size=lhs_shape).astype(A.dtype)
rhs_npy = np.random.uniform(size=rhs_shape).astype(A.dtype)
if typ == "add":
out_npy = lhs_npy + rhs_npy
elif typ == "sub":
out_npy = lhs_npy - rhs_npy
elif typ == "div":
rhs_npy = np.abs(rhs_npy) + 0.001
out_npy = lhs_npy / rhs_npy
elif typ == "mul":
out_npy = lhs_npy * rhs_npy
elif typ == "maximum":
out_npy = np.maximum(lhs_npy, rhs_npy)
elif typ == "minimum":
out_npy = np.minimum(lhs_npy, rhs_npy)
lhs_nd = tvm.nd.array(lhs_npy, tvm.cuda())
rhs_nd = tvm.nd.array(rhs_npy, tvm.cuda())
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), tvm.cuda())
for _ in range(2):
fcuda(lhs_nd, rhs_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
if __name__ == "__main__":
test_broadcast_to((1,), (10,))
test_broadcast_to((1, 1, 5, 4), (3, 4, 4, 4, 5, 4))
test_broadcast_to((1, 128, 1, 32), (64, 128, 64, 32))
test_broadcast_binary_op((5, 2, 3), (2, 1), typ="add")
test_broadcast_binary_op((5, 64, 128), (2, 5, 64, 1), typ="mul")
test_broadcast_binary_op((2, 3, 1, 32), (64, 32), typ="div")
test_broadcast_binary_op((1, 32), (64, 32), typ="sub")
test_broadcast_binary_op((32,), (64, 32), typ="maximum")
test_broadcast_binary_op((1, 2, 2, 1, 32), (64, 32), typ="minimum") |
import os |
import tvm
from tvm |
import te |
import numpy as np
from scipy |
import signal
from tvm.contrib |
import nvcc
from tvm |
import topi
from tvm.topi.utils |
import get_const_tuple
from tvm.topi.cuda.depthwise_conv2d |
import (
schedule_depthwise_conv2d_nchw,
schedule_depthwise_conv2d_nhwc,
)
TASK = "depthwise_conv2d"
USE_MANUAL_CODE = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_depthwise_conv2d_nchw():
"""You may test different settings."""
batch = 1
in_channel = 256
in_height = 96
in_width = 96
filter_channel = in_channel
channel_multiplier = 1
filter_height = 3
filter_width = 3
stride_h = 1
stride_w = 1
padding = "SAME"
Input = te.placeholder((batch, in_channel, in_height, in_width), name="Input")
Filter = te.placeholder(
(filter_channel, channel_multiplier, filter_height, filter_width), name="Filter"
)
Stride = [stride_h, stride_w]
Scale = te.placeholder((in_channel * channel_multiplier,), name="Scale")
Shift = te.placeholder((in_channel * channel_multiplier,), name="Shift")
DepthwiseConv2d = topi.nn.depthwise_conv2d_nchw(Input, Filter, Stride, padding)
ScaleShift = topi.nn.scale_shift_nchw(DepthwiseConv2d, Scale, Shift)
Relu = topi.nn.relu(ScaleShift)
s1 = schedule_depthwise_conv2d_nchw(DepthwiseConv2d)
s2 = schedule_depthwise_conv2d_nchw(ScaleShift)
s3 = schedule_depthwise_conv2d_nchw(Relu)
input_np = np.random.uniform(size=get_const_tuple(Input.shape)).astype(Input.dtype)
filter_np = np.random.uniform(size=get_const_tuple(Filter.shape)).astype(Filter.dtype)
scale_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Scale.dtype)
shift_np = np.random.uniform(size=(in_chann |
el * channel_multiplier)).astype(Shift.dtype)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Scale, Shift, ScaleShift], device)
f3 = tvm.build(s3, [Input, Filter, Scale, Shift, Relu], device)
input_tvm = tvm.nd.array(input_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
scale_tvm = tvm.nd.array(scale_np, dev)
shift_tvm = tvm.nd.array(shift_np, dev)
depthwise_conv2d_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape), dtype=DepthwiseConv2d.dtype), dev
)
scale_shift_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(ScaleShift.shape), dtype=ScaleShift.dtype), dev
)
relu_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), dev)
timer_1 = f1.time_evaluator(f1.entry_name, dev, number=1000)
tcost_1 = timer_1(input_tvm, filter_tvm, depthwise_conv2d_tvm).mean
timer_2 = f2.time_evaluator(f2.entry_name, dev, number=1000)
tcost_2 = timer_2(input_tvm, filter_tvm, scale_tvm, shift_tvm, scale_shift_tvm).mean
timer_3 = f3.time_evaluator(f3.entry_name, dev, number=1000)
tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean
print("Input shape = " + str(get_const_tuple(Input.shape)))
print("Filter shape = " + str(get_const_tuple(Filter.shape)))
print("Stride = (%d, %d)" % (stride_h, stride_w))
print("padding = %s\n" % padding)
print("Output shape = " + str(get_const_tuple(DepthwiseConv2d.shape)))
print("average time cost of 1000 runs (depthwise_conv2d) = %g us" % (tcost_1 * 1e6))
print(
"average time cost of 1000 runs (depthwise_conv2d + scal |
e_shift) = %g us"
% (tcost_2 * 1e6)
)
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us"
% (tcost_3 * 1e6)
)
depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(
input_np, filter_np, stride=[stride_h, stride_w], padding=padding
)
scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:, c, :, :] = (
depthwise_conv2d_scipy[:, c, :, :] * scale_np[c] + shift_np[c]
)
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5)
print("success")
for device in ["cuda", "opencl", "rocm"]:
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_step": 128, "explicit_unroll": device != "rocm"}}
):
check_device(device)
def test_depthwise_conv2d_nhwc():
"""You may test different settings."""
batch = 1
in_channel = 256
in_height = 96
in_width = 96
filter_channel = in_channel
channel_multiplier = 1
filter_height = 3
filter_width = 3
stride_h = 1
stride_w = 1
padding = "SAME"
Input = te.placeholder((batch, in_height, in_width, in_channel), name="Input")
Filter = te.placeholder(
(filter_height, filter_width, filter_channel, channel_multiplier), name="Filter"
)
Stride = [stride_h, stride_w]
Scale = te.placeholder((in_channel * channel_multiplier,), name="Scale")
Shift = te.placeholder((in_channel * channel_multiplier,), name="Shift")
DepthwiseConv2d = topi.nn.depthwise_conv2d_nhwc(Input, Filter, Stride, padding)
ScaleS |
hift = topi.nn.scale_shift_nhwc(DepthwiseConv2d, Scale, Shift)
Relu = topi.nn.relu(ScaleShift)
s1 = schedule_depthwise_conv2d_nhwc(DepthwiseConv2d)
s2 = schedule_depthwise_conv2d_nhwc(ScaleShift)
s3 = schedule_depthwise_conv2d_nhwc(Relu)
input_np = np.random.uniform(size=get_const_tuple(Input.shape)).astype(Input.dtype)
filter_np = np.random.uniform(size=get_const_tuple(Filter.shape)).astype(Filter.dtype)
scale_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Scale.dtype)
shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Scale, Shift, ScaleShift], device)
f3 = tvm.build(s3, [Input, Filter, Scale, Shift, Relu], device)
input_tvm = tvm.nd.array(input_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
scale_tvm = tvm.nd.array(scale_np, dev)
shift_tvm = tvm.nd.array(shift_np, dev)
depthwise_conv2d_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape), dtype=DepthwiseConv2d.dtype), dev
)
scale_shift_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(ScaleShift.shape), dtype=ScaleShift.dtype), dev
)
relu_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), dev)
timer_1 = f1.time_evaluator(f1.entry_name, dev, number=1000)
tcost_1 = timer_1(input_tvm, filter_tvm, depthwise_conv2d_tvm).mean
timer_2 = f2.time_evaluator(f2.entry_name, dev, number=1000)
tcost_2 = timer_2(input_tvm, filter_tvm, scale_tvm, shift_tvm, scale_shift_tvm).mean
timer_3 = f3.time_evaluator(f3.entry_name, dev, number=1000) |
tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean
print("Input shape = " + str(get_const_tuple(Input.shape)))
print("Filter shape = " + str(get_const_tuple(Filter.shape)))
print("Stride = (%d, %d)" % (stride_h, stride_w))
print("padding = %s\n" % padding)
print("Output shape = " + str(get_const_tuple(DepthwiseConv2d.shape)))
print("average time cost of 1000 runs (depthwise_conv2d) = %g us" % (tcost_1 * 1e6))
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us"
% (tcost_2 * 1e6)
)
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us"
% (tcost_3 * 1e6)
)
depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nhwc(
input_np, filter_np, stride=[stride_h, stride_w], padding=padding
)
scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:, :, :, c] = (
depthwise_conv2d_scipy[:, :, :, c] * scale_np[c] + shift_np[c]
)
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5)
print("success")
for device in ["cuda", "opencl", "rocm"]:
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_step": 128, "explicit_unroll": device != "cuda"}}
):
check_device(device)
if __name__ == "__main__":
test_depthwise_conv2d_nchw()
test_depthwise_conv2d_nhwc() |
"""Example code to do convolution.""" |
import os |
import numpy as np |
import scipy.signal |
import tvm
from tvm |
import te
from tvm.contrib |
import nvcc
from tvm |
import topi
from tvm.topi.utils |
import get_const_tuple
TASK = "conv2d_hwcn_map"
USE_MANUAL_CODE = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_conv2d_hwcn_map():
batch = 64
in_channel = 128
in_height = 16
in_width = 16
num_filter = 128
kernel = 3
stride = 2
padding = "SAME"
A = te.placeholder((in_height, in_width, in_channel, batch), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
B = topi.nn.conv2d_hwcn(A, W, stride, padding)
C = topi.nn.relu(B)
s1 = topi.cuda.schedule_conv2d_hwcn([B])
s2 = topi.cuda.schedule_conv2d_hwcn([C])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
w_np = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)
b_np = tvm.topi.testing.conv2d_hwcn_python(a_np, w_np, stride, padding)
c_np = np.maximum(b_np, 0)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
with tvm.transform.PassContext(
config={
"tir.UrollLoop": {"auto_unroll_max_step": 128, "explicit_unroll": device == "rocm"}
}
):
func1 = tvm.build(s1, [A, W, B], device)
func1(a, w, b) |
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
func2 = tvm.build(s2, [A, W, C], device)
func2(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["cuda", "opencl", "rocm"]:
check_device(device)
if __name__ == "__main__":
test_conv2d_hwcn_map() |
""" Conv Int8 functional and performance testing""" |
import sys |
import logging |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOGGER = logging.getLogger("test_conv_int8_intel")
LOGGER.disabled = False
WORKLOADS = [
(56, 56, 64, 64, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 64, 128, 3, 3, 1, 1, 2, 2),
(56, 56, 64, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 128, 3, 3, 1, 1, 1, 1),
(28, 28, 128, 256, 3, 3, 1, 1, 2, 2),
(28, 28, 128, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 256, 3, 3, 1, 1, 1, 1),
(14, 14, 256, 512, 3, 3, 1, 1, 2, 2),
(14, 14, 256, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 512, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 256, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 512, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 512, 1, 1, 0, 0, 2, 2),
(28, 28, 512, 128, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 1024, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 1024, 1, 1, 0, 0, 2, 2),
(14, 14, 1024, 256, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 2048, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 2048, 1, 1, 0, 0, 2, 2),
(7, 7, 2048, 512, 1, 1, 0, 0, 1, 1),
]
TARGET_NAME = "llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+v8.2a,+dotprod"
NUM_VEC_LANES = 16
DEV = tvm.device(TARGET_NAME, 0)
def get_shape(
im_height, im_width, in_filter, out_filter, k_h, k_w, hpad, wpad, hstride, wstride, out_dtype
):
"""
Finds out the shape of all data structures
"""
data_shape = (1, in_filter
if out_dtype == "int32" or out_dtype == "uint32":
kernel_shape = (
out_filter
in_filter
k_h,
k_w,
NUM_VEC_LANES
NUM_VEC_LANES,
4,
)
elif out_dtype == "float32":
kernel_shape = (
out_filter
in_filter
k_h,
k_w,
NUM_VEC_LANES,
NUM_VEC_LANES,
)
out_height = (im_height + 2 * hpad - |
k_h)
out_width = (im_width + 2 * wpad - k_w)
o_shape = (1, out_filter
return (data_shape, kernel_shape, o_shape)
def run_inference(
data_dtype,
kernel_dtype,
out_dtype,
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
):
"""
Runs the inference and checks the functional correctness between
compute and schedule outputs
"""
(data_shape, kernel_shape, o_shape) = get_shape(
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
out_dtype,
)
data = te.placeholder(data_shape, name="data", dtype=data_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=kernel_dtype)
if data_dtype == "float32":
data_array = tvm.nd.array(np.random.rand(*data_shape).astype(dtype=data_dtype), DEV)
kernel_array = tvm.nd.array(np.random.rand(*kernel_shape).astype(dtype=kernel_dtype), DEV)
else:
data_array = tvm.nd.array(np.random.randint(100, size=data_shape).astype(data_dtype))
kernel_array = tvm.nd.array(np.random.randint(100, size=kernel_shape).astype(kernel_dtype))
c_orig = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
c_sch = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
with tvm.target.Target(TARGET_NAME):
if out_dtype == "float32":
conv = topi.nn.conv2d_NCHWc(
data,
kernel,
stride=hstride,
padding=hpad,
dilation=(1, 1),
layout="NCHWc",
out_layout="NCHWc",
out_dtype=out_dtype,
)
else:
conv = topi.nn.conv2d_NCHWc_int8(
data,
kernel,
strides=hstride,
padding=hpad,
dilation=(1, 1),
layout="NCHWc",
out_ |
layout="NCHWc",
out_dtype=out_dtype,
)
out = topi.nn.relu(conv)
sch = te.create_schedule(out.op)
func = tvm.build(sch, [data, kernel, out], target=TARGET_NAME, name="out")
func(data_array, kernel_array, c_orig)
LOGGER.debug(tvm.lower(sch, [data, kernel], simple_mode=True))
if out_dtype == "float32":
sconv = topi.generic.nn.schedule_conv2d_NCHWc(outs=[out])
else:
sconv = topi.generic.nn.schedule_conv2d_NCHWc_int8(outs=[out])
func = tvm.build(sconv, [data, kernel, out], target=TARGET_NAME, name="conv")
func(data_array, kernel_array, c_sch)
if data_dtype == "uint8":
np.testing.assert_equal(c_orig.numpy(), c_sch.numpy())
else:
assert np.allclose(c_orig.numpy(), c_sch.numpy())
evaluator = func.time_evaluator(func.entry_name, DEV, number=1000)
LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True))
return evaluator(data_array, kernel_array, c_sch).mean
if __name__ == "__main__":
LOGGER.info("Workload, Kernel_size, FP32_time, INT8_time, Speedup")
SPEEDUP_ARRAY = []
for i, wkl in enumerate(WORKLOADS):
for dtype in ["uint", "int"]:
fp32_time = run_inference("float32", "float32", "float32", *wkl)
int8_time = run_inference("%s8" % dtype, "%s8" % dtype, "%s32" % dtype, *wkl)
kernel_h = wkl[4]
kernel_w = wkl[5]
LOGGER.info(
"[%s] Workload
+ str(i)
+ ", "
+ str(kernel_h)
+ "x"
+ str(kernel_w)
+ ", "
+ str(fp32_time)
+ ", "
+ str(int8_time)
+ ", "
+ str(fp32_time / int8_time)
)
SPEEDUP_ARRAY.append(fp32_time / int8_time)
LOGGER.info("Average speedup --> %s" % str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY)))) |
""" Conv Int8 functional and performance testing""" |
import sys |
import logging |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOGGER = logging.getLogger("test_conv_int8_intel")
LOGGER.disabled = False
WORKLOADS = [
(56, 56, 64, 64, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 64, 128, 3, 3, 1, 1, 2, 2),
(56, 56, 64, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 128, 3, 3, 1, 1, 1, 1),
(28, 28, 128, 256, 3, 3, 1, 1, 2, 2),
(28, 28, 128, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 256, 3, 3, 1, 1, 1, 1),
(14, 14, 256, 512, 3, 3, 1, 1, 2, 2),
(14, 14, 256, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 512, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 256, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 512, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 512, 1, 1, 0, 0, 2, 2),
(28, 28, 512, 128, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 1024, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 1024, 1, 1, 0, 0, 2, 2),
(14, 14, 1024, 256, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 2048, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 2048, 1, 1, 0, 0, 2, 2),
(7, 7, 2048, 512, 1, 1, 0, 0, 1, 1),
]
TARGET_NAME = "llvm -mcpu=skylake-avx512"
NUM_VEC_LANES = 16
DEV = tvm.device(TARGET_NAME, 0)
def get_shape(
im_height, im_width, in_filter, out_filter, k_h, k_w, hpad, wpad, hstride, wstride, out_dtype
):
"""
Finds out the shape of all data structures
"""
data_shape = (1, in_filter
if out_dtype == "int32":
kernel_shape = (
out_filter
in_filter
k_h,
k_w,
NUM_VEC_LANES
NUM_VEC_LANES,
4,
)
elif out_dtype == "float32":
kernel_shape = (
out_filter
in_filter
k_h,
k_w,
NUM_VEC_LANES,
NUM_VEC_LANES,
)
out_height = (im_height + 2 * hpad - k_h)
out_width = (im_width + 2 * wpad - k_w)
o_shape = |
(1, out_filter
return (data_shape, kernel_shape, o_shape)
def run_inference(
data_dtype,
kernel_dtype,
out_dtype,
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
):
"""
Runs the inference and checks the functional correctness between
compute and schedule outputs
"""
(data_shape, kernel_shape, o_shape) = get_shape(
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
out_dtype,
)
data = te.placeholder(data_shape, name="data", dtype=data_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=kernel_dtype)
if data_dtype == "float32":
data_array = tvm.nd.array(np.random.rand(*data_shape).astype(dtype=data_dtype), DEV)
kernel_array = tvm.nd.array(np.random.rand(*kernel_shape).astype(dtype=kernel_dtype), DEV)
else:
data_array = tvm.nd.array(np.random.randint(100, size=data_shape).astype(data_dtype))
kernel_array = tvm.nd.array(np.random.randint(100, size=kernel_shape).astype(kernel_dtype))
c_orig = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
c_sch = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
with tvm.target.Target(TARGET_NAME):
conv = topi.nn.conv2d_NCHWc(
data,
kernel,
stride=hstride,
padding=hpad,
dilation=(1, 1),
layout="NCHWc",
out_layout="NCHWc",
out_dtype=out_dtype,
)
out = topi.nn.relu(conv)
sch = te.create_schedule(out.op)
func = tvm.build(sch, [data, kernel, out], target=TARGET_NAME, name="out")
func(data_array, kernel_array, c_orig)
LOGGER.debug(tvm.lower(sch, [data, kernel], simple_mode=True))
sconv = topi.generic.nn.schedule_conv2d_NCHWc(outs=[out])
func = tvm.build(sconv, [data, kern |
el, out], target=TARGET_NAME, name="conv")
func(data_array, kernel_array, c_sch)
if data_dtype == "uint8":
np.testing.assert_equal(c_orig.numpy(), c_sch.numpy())
else:
assert np.allclose(c_orig.numpy(), c_sch.numpy())
evaluator = func.time_evaluator(func.entry_name, DEV, number=1000)
LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True))
return evaluator(data_array, kernel_array, c_sch).mean
if __name__ == "__main__":
LOGGER.info("Workload, Kernel_size, FP32_time, INT8_time, Speedup")
SPEEDUP_ARRAY = []
for i, wkl in enumerate(WORKLOADS):
fp32_time = run_inference("float32", "float32", "float32", *wkl)
int8_time = run_inference("uint8", "int8", "int32", *wkl)
kernel_h = wkl[4]
kernel_w = wkl[5]
LOGGER.info(
"Workload
+ str(i)
+ ", "
+ str(kernel_h)
+ "x"
+ str(kernel_w)
+ ", "
+ str(fp32_time)
+ ", "
+ str(int8_time)
+ ", "
+ str(fp32_time / int8_time)
)
SPEEDUP_ARRAY.append(fp32_time / int8_time)
LOGGER.info("Average speedup --> %s" % str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY)))) |
"""Example code to do square matrix multiplication on Android Phone.""" |
import tvm
from tvm |
import te |
import os
from tvm |
import rpc
from tvm.contrib |
import utils, ndk |
import numpy as np
proxy_host = os.environ["TVM_ANDROID_RPC_PROXY_HOST"]
proxy_port = 9090
key = "android"
arch = "arm64"
target = "llvm -mtriple=%s-linux-android" % arch
def ngflops(N):
return 2.0 * float(N * N * N) / (10**9)
dtype = "float32"
def evaluate(func, dev, N, times):
a_np = np.random.uniform(size=(N, N)).astype(dtype)
b_np = np.random.uniform(size=(N, N)).astype(dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((N, N), dtype=dtype), dev)
time_f = func.time_evaluator(func.entry_name, dev, number=times)
cost = time_f(a, b, c).mean
gf = ngflops(N) / cost
print("%g secs/op, %g GFLOPS" % (cost, gf))
np.testing.assert_almost_equal(c.numpy(), a_np.dot(b_np), decimal=2)
def test_gemm_gpu(N, times, bn, num_block, num_thread):
assert bn <= N
assert num_thread * num_thread * 16 <= N
assert num_block * num_block * 2 <= N
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="Btmp")
k = te.reduce_axis((0, N), name="k")
packedB = te.compute((N, N / bn, bn), lambda x, y, z: B[x, y * bn + z], name="B")
C = te.compute(
(N, N), lambda ii, jj: te.sum(A[ii, k] * packedB[k, jj / bn, jj % bn], axis=k), name="C"
)
s = te.create_schedule(C.op)
CC = s.cache_write(C, "local")
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_xz = te.thread_axis((0, 2), "vthread", name="vx")
thread_yz = te.thread_axis((0, 2), "vthread", name="vy")
pby, pbi = s[packedB].split(packedB.op.axis[0], nparts=num_thread)
pbx, pbj = s[packedB].split(packedB.op.axis[1], nparts=num_thread)
s[packedB].bind(pby, thread_y)
s[packedB].bind(pbx, thread_x)
pbz, pbk = s[packedB].split(packedB.op.axis[2], factor=8)
s[packedB].vectorize(pbk)
by, yi = s[C].split(C.op.axis[0], nparts=num_block)
bx, xi = s[C].split(C.op.axis[1], |
nparts=num_thread)
s[C].bind(by, block_y)
s[C].bind(bx, thread_y)
s[C].reorder(by, bx, yi, xi)
tyz, yi = s[C].split(yi, nparts=2)
ty, yi = s[C].split(yi, nparts=num_block)
txz, xi = s[C].split(xi, nparts=2)
tx, xi = s[C].split(xi, nparts=num_thread)
s[C].reorder(tyz, txz, ty, tx, yi, xi)
s[C].bind(tyz, thread_yz)
s[C].bind(txz, thread_xz)
s[C].bind(ty, block_x)
s[C].bind(tx, thread_x)
xyi, xxi = s[C].split(xi, factor=8)
s[C].reorder(tyz, txz, ty, tx, yi, xyi, xxi)
s[C].vectorize(xxi)
s[CC].compute_at(s[C], yi)
yo, xo = CC.op.axis
s[CC].reorder(k, yo, xo)
xo, xi = s[CC].split(xo, factor=8)
s[CC].vectorize(xi)
ko, ki = s[CC].split(k, factor=2)
s[CC].unroll(ki)
print(tvm.lower(s, [A, B, C], simple_mode=True))
f = tvm.build(s, [A, B, C], tvm.target.Target("opencl", host=target), name="gemm_gpu")
temp = utils.tempdir()
path_dso = temp.relpath("gemm_gpu.so")
f.export_library(path_dso, ndk.create_shared)
remote = rpc.connect(proxy_host, proxy_port, key=key)
dev = remote.cl(0)
remote.upload(path_dso)
f = remote.load_module("gemm_gpu.so")
evaluate(f, dev, N, times)
if __name__ == "__main__":
test_gemm_gpu(1024, times=5, bn=8, num_block=2, num_thread=8) |
"""Example code to do square matrix multiplication.""" |
import tvm
from tvm |
import te |
import os
from tvm.contrib |
import nvcc
from tvm.contrib |
import spirv |
import numpy as np |
import tvm.testing
TASK = "gemm"
USE_MANUAL_CODE = False
def test_gemm():
nn = 2048
n = te.var("n")
n = tvm.runtime.convert(nn)
m, l = n, n
A = te.placeholder((l, n), name="A")
B = te.placeholder((l, m), name="B")
k = te.reduce_axis((0, l), name="k")
C = te.compute((m, n), lambda ii, jj: te.sum(A[k, jj] * B[k, ii], axis=k), name="C")
s = te.create_schedule(C.op)
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
scale = 8
num_thread = 8
block_factor = scale * num_thread
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_y = te.thread_axis((0, num_thread), "threadIdx.y")
thread_xz = te.thread_axis((0, 2), "vthread", name="vx")
thread_yz = te.thread_axis((0, 2), "vthread", name="vy")
by, yi = s[C].split(C.op.axis[0], factor=block_factor)
bx, xi = s[C].split(C.op.axis[1], factor=block_factor)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].reorder(by, bx, yi, xi)
tyz, yi = s[C].split(yi, nparts=2)
ty, yi = s[C].split(yi, nparts=num_thread)
txz, xi = s[C].split(xi, nparts=2)
tx, xi = s[C].split(xi, nparts=num_thread)
s[C].bind(tyz, thread_yz)
s[C].bind(txz, thread_xz)
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
ko, ki = s[CC].split(k, factor=8)
kt, ki = s[CC].split(ki, factor=1)
s[CC].reorder(ko, kt, ki, yo, xo)
s[AA].compute_at(s[CC], ko)
s[BB].compute_at(s[CC], ko)
s[CC].unroll(kt)
s[AL].compute_at(s[CC], kt)
s[BL].compute_at(s[CC], kt)
ty, xi = s[AA].split(s[AA].op.axis[0], nparts=num_thread)
_, xi = s[AA].split(s[AA].op.axis[1], factor=num_thread * 4)
tx, xi = s[AA].split(xi, nparts=num_thre |
ad)
s[AA].bind(ty, thread_y)
s[AA].bind(tx, thread_x)
s[AA].vectorize(xi)
ty, xi = s[BB].split(s[BB].op.axis[0], nparts=num_thread)
_, xi = s[BB].split(s[BB].op.axis[1], factor=num_thread * 4)
tx, xi = s[BB].split(xi, nparts=num_thread)
s[BB].bind(ty, thread_y)
s[BB].bind(tx, thread_x)
s[BB].vectorize(xi)
s[AA].double_buffer()
s[BB].double_buffer()
def check_device(device):
dev = tvm.device(device, 0)
if not dev.exist:
print("Skip because %s is not enabled" % device)
return
print("Device %s" % device)
f = tvm.build(s, [A, B, C], device)
n, m, l = nn, nn, nn
a_np = np.random.uniform(size=(n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(m, l)).astype(B.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
for i in range(2):
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np.dot(b_np.T, a_np), rtol=1e-5)
num_flops = 2 * nn * nn * nn
num_runs = 10
timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs)
t = timer_f(a, b, c).mean
GFLOPS = num_flops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GFLOPS." % (num_runs, t * 1e3, GFLOPS))
for device in ["cuda", "opencl", "rocm", "nvptx", "vulkan"]:
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_step": 128, "explicit_unroll": device != "cuda"}}
):
check_device(device)
if __name__ == "__main__":
test_gemm() |
"Example code to perform int8 GEMM" |
import logging |
import sys |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm.topi.cuda.tensor_intrin |
import dp4a
DO_TUNING = True
PRETUNED_INDEX = 75333
intrin_dp4a = dp4a("local", "local", "local")
@autotvm.template
def gemm_int8(n, m, l):
A = te.placeholder((n, l), name="A", dtype="int8")
B = te.placeholder((m, l), name="B", dtype="int8")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
cfg = autotvm.get_config()
s = te.create_schedule(C.op)
y, x = C.op.axis
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
k = CC.op.reduce_axis[0]
cfg.define_split(
"tile_k",
cfg.axis(k),
num_outputs=3,
filter=lambda entity: entity.size[2] == 4 and entity.size[0] * 2 >= entity.size[1],
)
ko, kt, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].tensorize(ki, intrin_dp4a)
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
def block_size_filter(entity):
return (
entity.size[0] * 2 >= entity.size[1] * 2
and entity.size[1] <= 16
and entity.size[3] <= 4
)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4, filter=block_size_filter)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4, filter=block_size_filter)
by, tyz, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, txz, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, te.thread_axis("vthread"))
s[C].bind(txz, te.thread_axis("vthread"))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage |
in [AL, BL]:
s[stage].compute_at(s[CC], kt)
_, xi = s[stage].split(stage.op.axis[1], factor=4)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob("storage_align", [16, 48])
for stage in [AA, BB]:
s[stage].storage_align(s[stage].op.axis[0], cfg["storage_align"].val, 0)
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg["tile_y"].size[2])
tx, xi = s[stage].split(tx, nparts=cfg["tile_x"].size[2])
_, xi = s[stage].split(xi, factor=16)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
cfg.define_knob("auto_unroll_max_step", [512, 1500])
s[C].pragma(by, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(by, "unroll_explicit", False)
cfg.add_flop(n * m * l * 2)
return s, [A, B, C]
if __name__ == "__main__":
N = 2048
n = m = l = N
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
task = autotvm.task.create(gemm_int8, args=(n, m, l), target="cuda")
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4),
)
log_name = "gemm_int8.log"
if DO_TUNING:
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(
n_trial=1000,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)],
)
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config:")
print(best_config)
else:
config = task.config_space.get(PRETUNED_INDEX)
dispatch_context = autotvm.task.ApplyConfig(config)
print("Using pretuned config:")
print(config)
with dispatch_context:
with tvm.target.Target("cuda"): |
s, arg_bufs = gemm_int8(n, m, l)
f = tvm.build(s, arg_bufs, "cuda", name="gemm_int8")
dev = tvm.device("cuda", 0)
a_np = np.random.randint(size=(n, l), low=-128, high=127, dtype="int8")
b_np = np.random.randint(size=(m, l), low=-128, high=127, dtype="int8")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype="int32"), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5
)
num_ops = 2 * l * m * n
num_runs = 1000
timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs)
t = timer_f(a, b, c).mean
GOPS = num_ops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GOPS." % (num_runs, t * 1e3, GOPS)) |
import os |
import tvm
from tvm |
import te
from tvm.contrib |
import nvcc |
import numpy as np
from tvm |
import topi
TASK = "reduce_map"
USE_MANUAL_CODE = False
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_reduce_map(in_shape, axis, keepdims, type="sum", test_id=0):
global TASK
A = te.placeholder(shape=in_shape, name="A")
if type == "sum":
TASK = "sum_map_id%d" % test_id
B = topi.sum(A, axis=axis, keepdims=keepdims)
elif type == "max":
TASK = "max_map_id%d" % test_id
B = topi.max(A, axis=axis, keepdims=keepdims)
elif type == "min":
TASK = "min_map_id%d" % test_id
B = topi.min(A, axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
s = topi.cuda.schedule_reduce(B)
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {
"auto_max_step": 16,
}
}
):
fcuda = tvm.build(s, [A, B], "cuda", name="sum")
in_npy = np.random.normal(size=in_shape).astype(np.float32)
if type == "sum":
out_npy = in_npy.sum(axis=axis, keepdims=keepdims)
elif type == "max":
out_npy = in_npy.max(axis=axis, keepdims=keepdims)
elif type == "min":
out_npy = in_npy.min(axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
data_tvm = tvm.nd.array(in_npy, device=tvm.cuda())
out_tvm = tvm.nd.empty(shape=out_npy.shape, device=tvm.cuda())
for _ in range(2):
fcuda(data_tvm, out_tvm)
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, rtol=4e-4, atol=4e-4)
if __name__ == "__main__":
test_reduce_map(
in_shape=(128, 24, 128, 24), axis=(1, 2, 3), keepdims=True, type="sum", test_id=0
)
test_reduce_map(in_shape=(128, 24 * 128 * 24), axis=(1,), keepdims=False, type="max |
", test_id=1)
test_reduce_map(in_shape=(32, 128, 24), axis=None, keepdims=True, type="sum", test_id=2)
test_reduce_map(in_shape=(128, 24, 128, 24), axis=(0, 2), keepdims=False, type="min", test_id=3) |
"""LSTM Example, still work in progress..""" |
import tvm
from tvm |
import te |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.