text
stringlengths 1
2.05k
|
---|
import android.content.Intent;
public class MainActivity extends AppCompatActivity {
public static final int HANDLER_RESTART_DELAY = 5000;
private void showDialog(String title, String msg) {
AlertDialog.Builder builder = new AlertDialog.Builder(this);
builder.setTitle(title);
builder.setMessage(msg);
builder.setCancelable(true);
builder.setNeutralButton(android.R.string.ok,
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int id) {
dialog.cancel();
}
});
builder.create().show();
}
public Intent updateRPCPrefs() {
System.err.println("updating preferences...");
EditText edProxyAddress = findViewById(R.id.input_address);
EditText edProxyPort = findViewById(R.id.input_port);
EditText edAppKey = findViewById(R.id.input_key);
SwitchCompat inputSwitch = findViewById(R.id.switch_persistent);
final String proxyHost = edProxyAddress.getText().toString();
final int proxyPort = Integer.parseInt(edProxyPort.getText().toString());
final String key = edAppKey.getText().toString();
final boolean isChecked = inputSwitch.isChecked();
SharedPreferences pref = getApplicationContext().getSharedPreferences("RPCProxyPreference", Context.MODE_PRIVATE);
SharedPreferences.Editor editor = pref.edit();
editor.putString("input_address", proxyHost);
editor.putString("input_port", edProxyPort.getText().toString());
editor.putString("input_key", key);
editor.putBoolean("input_switch", isChecked);
editor.commit();
Intent intent = new Intent(this, RPCActivity.class);
intent.putExtra("host", proxyHost);
intent.putExtra("port", proxyPort);
intent.putExtra("key", key);
return intent;
}
private void setupRelaunch() {
final Context context = this;
final SwitchCompat switchPersistent = findViewById(R.id.switch_persistent);
final Runnable rPCStarter = new Runnable() {
public void run() {
if (switchPersistent.is |
Checked()) {
System.err.println("relaunching RPC activity...");
Intent intent = ((MainActivity) context).updateRPCPrefs();
startActivity(intent);
}
}
};
Handler handler = new Handler(Looper.getMainLooper());
handler.postDelayed(rPCStarter, HANDLER_RESTART_DELAY);
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
Toolbar toolbar = findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
final Context context = this;
SwitchCompat switchPersistent = findViewById(R.id.switch_persistent);
switchPersistent.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) {
if (isChecked) {
System.err.println("automatic RPC restart enabled...");
updateRPCPrefs();
setupRelaunch();
} else {
System.err.println("automatic RPC restart disabled...");
updateRPCPrefs();
}
}
});
enableInputView(true);
}
@Override
protected void onResume() {
System.err.println("MainActivity onResume...");
enableInputView(true);
setupRelaunch();
super.onResume();
}
@Override
protected void onDestroy() {
super.onDestroy();
}
private void enableInputView(boolean enable) {
EditText edProxyAddress = findViewById(R.id.input_address);
EditText edProxyPort = findViewById(R.id.input_port);
EditText edAppKey = findViewById(R.id.input_key);
SwitchCompat input_switch = findViewById(R.id.switch_persistent);
edProxyAddress.setEnabled(enable);
edProxyPort.setEnabled(enable);
edAppKey.setEnabled(enable);
if (enable) {
SharedPreferences pref = getApplicationContext().getSharedPreferences("RPCProxyPreference", Context.MODE_PRIVATE);
String inputAddress = pref.getString("input_address", null); |
if (null != inputAddress)
edProxyAddress.setText(inputAddress);
String inputPort = pref.getString("input_port", null);
if (null != inputPort)
edProxyPort.setText(inputPort);
String inputKey = pref.getString("input_key", null);
if (null != inputKey)
edAppKey.setText(inputKey);
boolean isChecked = pref.getBoolean("input_switch", false);
input_switch.setChecked(isChecked);
}
}
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.tvmrpc; |
import android.os.Bundle; |
import androidx.appcompat.app.AppCompatActivity; |
import android.content.Intent; |
import android.widget.Button; |
import android.view.View;
public class RPCActivity extends AppCompatActivity {
private RPCProcessor tvmServerWorker;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_rpc);
Button stopRPC = findViewById(R.id.button_stop_rpc);
stopRPC.setOnClickListener(new View.OnClickListener() {
public void onClick(View v) {
System.err.println(tvmServerWorker == null);
if (tvmServerWorker != null) {
tvmServerWorker.disconnect();
}
finish();
System.exit(0);
}
});
System.err.println("rpc activity onCreate...");
Intent intent = getIntent();
String host = intent.getStringExtra("host");
int port = intent.getIntExtra("port", 9090);
String key = intent.getStringExtra("key");
tvmServerWorker = new RPCProcessor(this);
tvmServerWorker.setDaemon(true);
tvmServerWorker.start();
tvmServerWorker.connect(host, port, key);
}
@Override
protected void onDestroy() {
System.err.println("rpc activity onDestroy");
tvmServerWorker.disconnect();
super.onDestroy();
android.os.Process.killProcess(android.os.Process.myPid());
}
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.tvmrpc;
import android.app.Activity;
import org.apache.tvm.rpc.RPCWatchdog;
/**
* Watchdog for Android RPC.
*/
public class RPCAndroidWatchdog extends RPCWatchdog {
public Activity rpc_activity = null;
public RPCAndroidWatchdog(Activity activity) {
super();
rpc_activity = activity;
}
/**
* Method to non-destructively terminate the running thread on Android
*/
@Override protected void terminate() {
rpc_activity.finish();
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.tvmrpc; |
import android.app.Activity; |
import android.os.ParcelFileDescriptor; |
import java.net.Socket; |
import org.apache.tvm.rpc.ConnectTrackerServerProcessor;
/**
* Connect to RPC proxy and deal with requests.
*/
class RPCProcessor extends Thread {
private String host;
private int port;
private String key;
private boolean running = false;
private long startTime;
private ConnectTrackerServerProcessor currProcessor;
private boolean first = true;
private Activity rpc_activity = null;
public RPCProcessor(Activity activity) {
super();
rpc_activity = activity;
}
@Override public void run() {
RPCAndroidWatchdog watchdog = new RPCAndroidWatchdog(rpc_activity);
watchdog.start();
while (true) {
synchronized (this) {
currProcessor = null;
while (!running) {
try {
this.wait();
} catch (InterruptedException e) {
}
}
try {
currProcessor = new ConnectTrackerServerProcessor(host, port, key, watchdog);
} catch (Throwable e) {
e.printStackTrace();
System.exit(0);
}
}
if (currProcessor != null)
currProcessor.run();
watchdog.finishTimeout();
}
}
/**
* Disconnect from the proxy server.
*/
synchronized void disconnect() {
if (running) {
running = false;
if (currProcessor != null) {
currProcessor.terminate();
}
}
}
/**
* Start rpc processor and connect to the proxy server.
* @param host proxy server host.
* @param port proxy server port.
* @param key proxy server key.
*/
synchronized void connect(String host, int port, String key) {
this.host = host;
this.port = port;
this.key = key;
running = true;
this.notify();
}
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file tvm_runtime.h
* \brief Pack all tvm runtime source files
*/
/* Enable custom logging - this will cause TVM to use a custom implementation
* of tvm::runtime::detail::LogMessage. We use this to pass TVM log messages to
* Android logcat.
*/
namespace tvm {
namespace runtime {
namespace detail {
[[noreturn]] void LogFatalImpl(const std::string& file, int lineno, const std::string& message) {
std::string m = file + ":" + std::to_string(lineno) + ": " + message;
__android_log_write(ANDROID_LOG_FATAL, "TVM_RUNTIME", m.c_str());
throw InternalError(file, lineno, message);
}
void LogMessageImpl(const std::string& file, int lineno, int level, const std::string& message) {
std::string m = file + ":" + std::to_string(lineno) + ": " + message;
__android_log_write(ANDROID_LOG_DEBUG + level, "TVM_RUNTIME", m.c_str());
}
}
}
} |
"""Testcode for Android RPC.
To use it, start an RPC tracker with "python -m tvm.exec.rpc_tracker".
Use the tracker's address and port when configuring the RPC app.
Use "android" as the key if you wish to avoid modifying this script.
""" |
import tvm
from tvm |
import te |
import os
from tvm |
import rpc
from tvm.contrib |
import utils, ndk |
import numpy as np
tracker_host = os.environ["TVM_TRACKER_HOST"]
tracker_port = int(os.environ["TVM_TRACKER_PORT"])
key = "android"
arch = "arm64"
target = "llvm -mtriple=%s-linux-android" % arch
test_opencl = False
test_vulkan = False
def test_rpc_module():
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
a_np = np.random.uniform(size=1024).astype(A.dtype)
temp = utils.tempdir()
tracker = rpc.connect_tracker(tracker_host, tracker_port)
remote = tracker.request(key, priority=0, session_timeout=60)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].parallel(xi)
s[B].pragma(xo, "parallel_launch_point")
s[B].pragma(xi, "parallel_barrier_when_finish")
f = tvm.build(s, [A, B], target, name="myadd_cpu")
path_dso_cpu = temp.relpath("cpu_lib.so")
f.export_library(path_dso_cpu, ndk.create_shared)
print("Run CPU test ...")
dev = remote.cpu(0)
remote.upload(path_dso_cpu)
f2 = remote.load_module("cpu_lib.so")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev)
time_f = f2.time_evaluator(f2.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("%g secs/op\n" % cost)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
if test_opencl:
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xi, te.thread_axis("threadIdx.x"))
s[B].bind(xo, te.thread_axis("blockIdx.x"))
f = tvm.build(s, [A, B], tvm.target.Target("opencl", host=target), name="myadd")
path_dso_cl = temp.relpath("dev_lib_cl.so")
f.export_library(path_dso_cl, ndk.create_shared)
print("Run GPU(OpenCL Flavor) test ...")
dev = remote.cl(0)
remote.upload(path_dso_cl)
f1 = remote.load_module("dev_lib_cl.so")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array( |
np.zeros(1024, dtype=A.dtype), dev)
time_f = f1.time_evaluator(f1.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("%g secs/op\n" % cost)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
if test_vulkan:
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xi, te.thread_axis("threadIdx.x"))
s[B].bind(xo, te.thread_axis("blockIdx.x"))
f = tvm.build(s, [A, B], tvm.target.Target("vulkan", host=target), name="myadd")
path_dso_vulkan = temp.relpath("dev_lib_vulkan.so")
f.export_library(path_dso_vulkan, ndk.create_shared)
print("Run GPU(Vulkan Flavor) test ...")
dev = remote.vulkan(0)
remote.upload(path_dso_vulkan)
f1 = remote.load_module("dev_lib_vulkan.so")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev)
time_f = f1.time_evaluator(f1.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("%g secs/op\n" % cost)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
if __name__ == "__main__":
test_rpc_module() |
"""Benchmark script for ImageNet models on ARM CPU.
see README.md for the usage and results of this script.
""" |
import argparse |
import numpy as np |
import tvm
from tvm |
import te
from tvm.contrib.utils |
import tempdir |
import tvm.contrib.graph_executor as runtime
from tvm |
import relay
from util |
import get_network, print_progress
def evaluate_network(network, target, target_host, repeat):
tracker = tvm.rpc.connect_tracker(args.host, args.port)
remote = tracker.request(args.rpc_key)
print_progress(network)
net, params, input_shape, output_shape = get_network(network, batch_size=1)
print_progress("%-20s building..." % network)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(net, target=tvm.target.Target(target, host=target_host), params=params)
tmp = tempdir()
if "android" in str(target):
from tvm.contrib |
import ndk
filename = "%s.so" % network
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "%s.tar" % network
lib.export_library(tmp.relpath(filename))
print_progress("%-20s uploading..." % network)
dev = remote.device(str(target), 0)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
module = runtime.GraphModule(rlib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
print_progress("%-20s evaluating..." % network)
ftimer = module.module.time_evaluator("run", dev, number=1, repeat=repeat)
prof_res = np.array(ftimer().results) * 1000
print(
"%-20s %-19s (%s)" % (network, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res))
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--network",
type=str,
choices=[
"resnet-18",
"resnet-34",
"resnet-50",
"vgg-16",
"vgg-19",
"densenet-121",
"inception_v3",
"mobilenet",
"squeezenet_v1.0",
"squeezenet_v1.1",
],
help="The name of neural network",
)
parser.add_argument(
"--model",
type=str,
choices=["rk3399", "mate10", "mate10pro", "p20", "p20pro", "pixel2", "rasp3b", "pynq"],
default="rk3399",
help="The model of the test device. If your device is not listed in "
"the choices list, pick the most similar one as argument.",
)
parser.add_argument("--host", type=str, default="127.0.0.1")
parser.add_argument("--port", type=int, default=9190)
parser.add_argument("--rpc-key", type=str, required=True)
parser.add_argument("--repeat", type=int, default=10)
args = parser.parse_args()
dtype = "float32"
if args.network is None:
networks = ["squeezenet_v1.1", |
"mobilenet", "resnet-18", "vgg-16"]
else:
networks = [args.network]
target = tvm.target.arm_cpu(model=args.model)
target_host = None
print("--------------------------------------------------")
print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)"))
print("--------------------------------------------------")
for network in networks:
evaluate_network(network, target, target_host, args.repeat) |
"""Benchmark script for ImageNet models on GPU.
see README.md for the usage and results of this script.
""" |
import argparse |
import threading |
import numpy as np |
import tvm
from tvm |
import te |
import tvm.contrib.graph_executor as runtime
from tvm |
import relay
from util |
import get_network
def benchmark(network, target):
net, params, input_shape, output_shape = get_network(network, batch_size=1)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(net, target=target, params=params)
dev = tvm.device(str(target), 0)
module = runtime.GraphModule(lib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
ftimer = module.module.time_evaluator("run", dev, number=1, repeat=args.repeat)
prof_res = np.array(ftimer().results) * 1000
print(
"%-20s %-19s (%s)" % (network, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res))
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--network",
type=str,
choices=[
"resnet-18",
"resnet-34",
"resnet-50",
"vgg-16",
"vgg-19",
"densenet-121",
"inception_v3",
"mobilenet",
"squeezenet_v1.0",
"squeezenet_v1.1",
],
help="The name of neural network",
)
parser.add_argument(
"--device",
type=str,
choices=["amd_apu"],
default="amd_apu",
help="The name of the test device. If your device is not listed in "
"the choices list, pick the most similar one as argument.",
)
parser.add_argument(
"--model",
type=str,
choices=["1080ti", "titanx", "tx2", "gfx900", "v1000"],
default="1080ti",
help="The model of the test device. If your device is not listed in "
"the choices list, pick the most similar one as argument.",
)
parser.add_argument("--repeat", type=int, default=600)
parser.add_argument(
"--target",
type=str,
choices=["cuda", "opencl", "rocm", "nvptx", "metal", "vulkan"],
default="cuda",
help="The tvm compilation target",
)
parser.add_a |
rgument("--thread", type=int, default=1, help="The number of threads to be run.")
args = parser.parse_args()
dtype = "float32"
if args.network is None:
networks = ["resnet-50", "mobilenet", "vgg-19", "inception_v3"]
else:
networks = [args.network]
target = tvm.target.Target("%s -device=%s -model=%s" % (args.target, args.device, args.model))
print("--------------------------------------------------")
print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)"))
print("--------------------------------------------------")
for network in networks:
if args.thread == 1:
benchmark(network, target)
else:
threads = list()
for n in range(args.thread):
thread = threading.Thread(
target=benchmark, args=([network, target]), name="thread%d" % n
)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join() |
"""Benchmark script for ImageNet models on mobile GPU.
see README.md for the usage and results of this script.
""" |
import argparse |
import numpy as np |
import tvm
from tvm |
import te
from tvm.contrib.utils |
import tempdir |
import tvm.contrib.graph_executor as runtime
from tvm |
import relay
from util |
import get_network, print_progress
def evaluate_network(network, target, target_host, dtype, repeat):
tracker = tvm.rpc.connect_tracker(args.host, args.port)
remote = tracker.request(args.rpc_key)
print_progress(network)
net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype)
print_progress("%-20s building..." % network)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(net, target=tvm.target.Target(target, host=target_host), params=params)
tmp = tempdir()
if "android" in str(target) or "android" in str(target_host):
from tvm.contrib |
import ndk
filename = "%s.so" % network
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "%s.tar" % network
lib.export_library(tmp.relpath(filename))
print_progress("%-20s uploading..." % network)
dev = remote.device(str(target), 0)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
module = runtime.GraphModule(rlib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
print_progress("%-20s evaluating..." % network)
ftimer = module.module.time_evaluator("run", dev, number=1, repeat=repeat)
prof_res = np.array(ftimer().results) * 1000
print(
"%-20s %-19s (%s)" % (network, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res))
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--network",
type=str,
choices=[
"resnet-18",
"resnet-34",
"resnet-50",
"vgg-16",
"vgg-19",
"densenet-121",
"inception_v3",
"mobilenet",
"squeezenet_v1.0",
"squeezenet_v1.1",
],
help="The name of neural network",
)
parser.add_argument(
"--model",
type=str,
choices=["rk3399"],
default="rk3399",
help="The model of the test device. If your device is not listed in "
"the choices list, pick the most similar one as argument.",
)
parser.add_argument("--host", type=str, default="127.0.0.1")
parser.add_argument("--port", type=int, default=9190)
parser.add_argument("--rpc-key", type=str, required=True)
parser.add_argument("--repeat", type=int, default=30)
parser.add_argument("--dtype", type=str, default="float32")
args = parser.parse_args()
if args.network is None:
networks = ["squeezenet_v1.1", "mobilenet", "resnet-18", |
"vgg-16"]
else:
networks = [args.network]
target = tvm.target.mali(model=args.model)
target_host = tvm.target.arm_cpu(model=args.model)
print("--------------------------------------------------")
print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)"))
print("--------------------------------------------------")
for network in networks:
evaluate_network(network, target, target_host, args.dtype, args.repeat) |
"""Utility for benchmark""" |
import sys
from tvm |
import relay
from tvm.relay |
import testing
def get_network(name, batch_size, dtype="float32"):
"""Get the symbol definition and random weight of a network
Parameters
----------
name: str
The name of the network, can be 'resnet-18', 'resnet-50', 'vgg-16', 'inception_v3', 'mobilenet', ...
batch_size: int
batch size
dtype: str
Data type
Returns
-------
net: tvm.IRModule
The relay function of network definition
params: dict
The random parameters for benchmark
input_shape: tuple
The shape of input tensor
output_shape: tuple
The shape of output tensor
"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if name == "mobilenet":
net, params = testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
net, params = testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif "resnet" in name:
n_layer = int(name.split("-")[1])
net, params = testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
net, params = testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "densenet" in name:
n_layer = int(name.split("-")[1])
net, params = testing.densenet.get_workload(
densenet_size=n_layer, batch_size=batch_size, dtype=dtype
)
elif "squeezenet" in name:
version = name.split("_v")[1]
net, params = testing.squeezenet.get_workload(
batch_size=batch_size, version=version, dtype=dtype
)
elif name == "mxnet":
from mxnet.gluon.model_zoo.vision |
import get_model
block = get_model("resnet18_v1", pretrained=True)
net, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = net["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
net = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return net, params, input_shape, output_shape
def print_progress(msg):
"""print progress message
Parameters
----------
msg: str
The message to print
"""
sys.stdout.write(msg + "\r")
sys.stdout.flush() |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#define _GNU_SOURCE
#include "backtrace.h"
#include <dlfcn.h>
#include <execinfo.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
const char* g_argv0 = NULL;
void tvm_platform_abort_backtrace() {
void* trace[200];
int nptrs = backtrace(trace, sizeof(trace) / sizeof(void*));
fprintf(stderr, "backtrace: %d\n", nptrs);
if (nptrs < 0) {
perror("backtracing");
} else {
backtrace_symbols_fd(trace, nptrs, STDOUT_FILENO);
char cmd_buf[1024];
for (int i = 0; i < nptrs; i++) {
Dl_info info;
if (dladdr(trace[i], &info)) {
fprintf(stderr, "symbol %d: %s %s %p (%p)\n", i, info.dli_sname, info.dli_fname,
info.dli_fbase, (void*)(trace[i] - info.dli_fbase));
snprintf(cmd_buf, sizeof(cmd_buf), "addr2line --exe=%s -p -i -a -f %p", g_argv0,
(void*)(trace[i] - info.dli_fbase));
int result = system(cmd_buf);
if (result < 0) {
perror("invoking backtrace command");
}
} else {
fprintf(stderr, "symbol %d: %p (unmapped)\n", i, trace[i]);
}
}
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifdef __cplusplus
extern "C" {
#endif
extern const char* g_argv0;
void tvm_platform_abort_backtrace(void);
#ifdef __cplusplus
}
#endif
|
"""Creates a simple TVM modules.""" |
import argparse |
import os
from tvm |
import relay |
import tvm
from tvm |
import runtime as tvm_runtime |
import logging
from tvm.relay.backend |
import Runtime
from tvm.contrib |
import cc as _cc
RUNTIMES = [
(Runtime("crt", {"system-lib": True}), "{name}_c.{ext}"),
(Runtime("cpp", {"system-lib": True}), "{name}_cpp.{ext}"),
]
def build_module(opts):
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision |
import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
func = mod["main"]
func = relay.Function(
func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs
)
for runtime, file_format_str in RUNTIMES:
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
graph, lib, params = relay.build(func, "llvm", runtime=runtime, params=params)
build_dir = os.path.abspath(opts.out_dir)
if not os.path.isdir(build_dir):
os.makedirs(build_dir)
ext = "tar" if str(runtime) == "crt" else "o"
lib_file_name = os.path.join(build_dir, file_format_str.format(name="model", ext=ext))
if str(runtime) == "crt":
lib.export_library(lib_file_name)
else:
lib.save(lib_file_name)
with open(
os.path.join(build_dir, file_format_str.format(name="graph", ext="json")), "w"
) as f_graph_json:
f_graph_json.write(graph)
with open(
os.path.join(build_dir, file_format_str.format(name="params", ext="bin")), "wb"
) as f_params:
f_params.write(tvm_runtime.save_param_dict(params))
def build_test_module(opts): |
import numpy as np
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
params = {"y": y_data}
for runtime, file_format_str in RUNTIMES:
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
graph, lib, lowered_params = relay.build(
tvm.IRModule.from_expr(func),
"llvm",
runtime=runtime,
params=params,
)
build_dir = os.path.abspath(opts.out_dir)
if not os.path.isdir(build_dir):
os.makedirs(build_dir)
ext = "tar" if str(runtime) == "crt" else "o"
lib_file_name = os.path.join(build_dir, file_format_str.format(name="test_model", ext=ext))
if str(runtime) == "crt":
lib.export_library(lib_file_name)
else:
lib.save(lib_file_name)
with open(
os.path.join(build_dir, file_format_str.format(name="test_graph", ext="json")), "w"
) as f_graph_json:
f_graph_json.write(graph)
with open(
os.path.join(build_dir, file_format_str.format(name="test_params", ext="bin")), "wb"
) as f_params:
f_params.write(tvm_runtime.save_param_dict(lowered_params))
with open(
os.path.join(build_dir, file_format_str.format(name="test_data", ext="bin")), "wb"
) as fp:
fp.write(x_data.astype(np.float32).tobytes())
x_output = x_data + y_data
with open(
os.path.join(build_dir, file_format_str.format(name="test_output", ext="bin")), "wb"
) as fp:
fp.write(x_output.astype(np.float32).tobytes())
def build_inputs(opts):
from tvm.contrib |
import download
from PIL |
import Image |
import numpy as np
build_dir = os.path.abspath(opts.out_dir)
image_url = "https:
image_fn = os.path.join(build_dir, "cat.png")
download.download(image_url, image_fn)
image = Image.open(image_fn).resize((224, 224))
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
print("x", x.shape)
with open(os.path.join(build_dir, "cat.bin"), "wb") as fp:
fp.write(x.astype(np.float32).tobytes())
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--out-dir", default=".")
parser.add_argument("-t", "--test", action="store_true")
opts = parser.parse_args()
if opts.test:
build_test_module(opts)
else:
build_module(opts)
build_inputs(opts) |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
static uint8_t g_crt_memory[CRT_MEMORY_NUM_PAGES * (1 << CRT_MEMORY_PAGE_SIZE_LOG2)];
static MemoryManagerInterface* g_memory_manager;
/*! \brief macro to do C API call */
do { \
int ret = (func); \
if (ret != 0) { \
fprintf(stderr, "%s: %d: error: %s\n", __FILE__, __LINE__, TVMGetLastError()); \
exit(ret); \
} \
} while (0)
TVM_DLL void* tvm_runtime_create(const char* json_data, const char* params_data,
const uint64_t params_size, const char* argv0) {
g_argv0 = argv0;
int64_t device_type = kDLCPU;
int64_t device_id = 0;
TVMByteArray params;
params.data = params_data;
params.size = params_size;
DLDevice dev;
dev.device_type = (DLDeviceType)device_type;
dev.device_id = device_id;
TVM_CCALL(PageMemoryManagerCreate(&g_memory_manager, g_crt_memory, sizeof(g_crt_memory),
C |
RT_MEMORY_PAGE_SIZE_LOG2));
TVM_CCALL(TVMInitializeRuntime());
TVMPackedFunc pf;
TVMArgs args = TVMArgs_Create(NULL, NULL, 0);
TVM_CCALL(TVMPackedFunc_InitGlobalFunc(&pf, "runtime.SystemLib", &args));
TVM_CCALL(TVMPackedFunc_Call(&pf));
TVMModuleHandle mod_syslib = TVMArgs_AsModuleHandle(&pf.ret_value, 0);
TVMGraphExecutor* graph_executor = NULL;
TVM_CCALL(TVMGraphExecutor_Create(json_data, mod_syslib, &dev, &graph_executor));
TVM_CCALL(TVMGraphExecutor_LoadParams(graph_executor, params.data, params.size));
return graph_executor;
}
TVM_DLL void tvm_runtime_destroy(void* executor) {
TVMGraphExecutor_Release((TVMGraphExecutor**)&executor);
}
TVM_DLL void tvm_runtime_set_input(void* executor, const char* name, DLTensor* tensor) {
TVMGraphExecutor* graph_executor = (TVMGraphExecutor*)executor;
TVMGraphExecutor_SetInput(graph_executor, name, tensor);
}
TVM_DLL void tvm_runtime_run(void* executor) {
TVMGraphExecutor* graph_executor = (TVMGraphExecutor*)executor;
TVMGraphExecutor_Run(graph_executor);
}
TVM_DLL void tvm_runtime_get_output(void* executor, int32_t index, DLTensor* tensor) {
TVMGraphExecutor* graph_executor = (TVMGraphExecutor*)executor;
TVMGraphExecutor_GetOutput(graph_executor, index, tensor);
}
void TVMLogf(const char* msg, ...) {
va_list args;
va_start(args, msg);
vfprintf(stderr, msg, args);
va_end(args);
}
void __attribute__((noreturn)) TVMPlatformAbort(tvm_crt_error_t error_code) {
fprintf(stderr, "TVMPlatformAbort: %d\n", error_code);
tvm_platform_abort_backtrace();
exit(-1);
}
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
return g_memory_manager->Allocate(g_memory_manager, num_bytes, dev, out_ptr);
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
return g_memory_manager->Free(g_memory_manager, ptr, dev);
}
tvm_crt_error_t TVMPlatformTimerStart() { return kTvmErrorFunctionCallNotImplemented; }
tvm_crt_error_t TVMPlatformTimerStop(double* elapsed_time_seconds) {
retu |
rn kTvmErrorFunctionCallNotImplemented;
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_APPS_BUNDLE_DEPLOY_BUNDLE_H_
#define TVM_APPS_BUNDLE_DEPLOY_BUNDLE_H_
#include <tvm/runtime/c_runtime_api.h>
TVM_DLL void* tvm_runtime_create(const char* json_data, const char* params_data,
const uint64_t params_size, const char* argv);
TVM_DLL void tvm_runtime_destroy(void* runtime);
TVM_DLL void tvm_runtime_set_input(void* runtime, const char* name, DLTensor* tensor);
TVM_DLL void tvm_runtime_run(void* runtime);
TVM_DLL void tvm_runtime_get_output(void* runtime, int32_t index, DLTensor* tensor);
#endif /* TVM_APPS_BUNDLE_DEPLOY_BUNDLE_H_ */
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
static uint8_t g_crt_memory[CRT_MEMORY_NUM_PAGES * (1 << CRT_MEMORY_PAGE_SIZE_LOG2)];
static MemoryManagerInterface* g_memory_manager;
/*! \brief macro to do C API call */
do { \
tvm_crt_error_t ret = (func); \
if (ret != kTvmErrorNoError) { \
fprintf(stderr, "%s: %d: error: %s\n", __FILE__, __LINE__, TVMGetLastError()); \
exit(ret); \
} \
} while (0)
TVM_DLL void* tvm_runtime_create(const char* json_data, const char* params_data,
const uint64_t params_size, const char* argv0) {
g_argv0 = argv0;
int64_t device_type = kDLCPU;
int64_t device_id = 0;
TVMByteArray params;
params.data = params_data;
params.size = params_size;
DLDevice dev;
dev.device_type = (DLDeviceType)device_type;
dev.device_id = device_id;
TVM_CCALL(PageMemoryManagerCreate(&g_memory_manager, g_crt_memory, sizeof(g_crt_memory),
C |
RT_MEMORY_PAGE_SIZE_LOG2));
TVM_CCALL(TVMInitializeRuntime());
TVMPackedFunc pf;
TVMArgs args = TVMArgs_Create(NULL, NULL, 0);
TVM_CCALL(TVMPackedFunc_InitGlobalFunc(&pf, "runtime.SystemLib", &args));
TVM_CCALL(TVMPackedFunc_Call(&pf));
TVMModuleHandle mod_syslib = TVMArgs_AsModuleHandle(&pf.ret_value, 0);
TVMGraphExecutor* graph_executor = NULL;
TVM_CCALL(TVMGraphExecutor_Create(json_data, mod_syslib, &dev, &graph_executor));
TVM_CCALL(TVMGraphExecutor_LoadParams(graph_executor, params.data, params.size));
return graph_executor;
}
TVM_DLL void tvm_runtime_destroy(void* executor) {
TVMGraphExecutor* graph_executor = (TVMGraphExecutor*)executor;
TVMGraphExecutor_Release(&graph_executor);
}
TVM_DLL void tvm_runtime_set_input(void* executor, const char* name, DLTensor* tensor) {
TVMGraphExecutor* graph_executor = (TVMGraphExecutor*)executor;
TVMGraphExecutor_SetInput(graph_executor, name, tensor);
}
TVM_DLL void tvm_runtime_run(void* executor) {
TVMGraphExecutor* graph_executor = (TVMGraphExecutor*)executor;
TVMGraphExecutor_Run(graph_executor);
}
TVM_DLL void tvm_runtime_get_output(void* executor, int32_t index, DLTensor* tensor) {
TVMGraphExecutor* graph_executor = (TVMGraphExecutor*)executor;
TVMGraphExecutor_GetOutput(graph_executor, index, tensor);
}
void TVMLogf(const char* msg, ...) {
va_list args;
va_start(args, msg);
vfprintf(stderr, msg, args);
va_end(args);
}
void __attribute__((noreturn)) TVMPlatformAbort(tvm_crt_error_t error_code) {
fprintf(stderr, "TVMPlatformAbort: %d\n", error_code);
tvm_platform_abort_backtrace();
exit(-1);
}
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
return g_memory_manager->Allocate(g_memory_manager, num_bytes, dev, out_ptr);
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
return g_memory_manager->Free(g_memory_manager, ptr, dev);
}
tvm_crt_error_t TVMPlatformTimerStart() { return kTvmErrorFunctionCallNotImplemented; }
tvm_crt_error_t TVMPlat |
formTimerStop(double* elapsed_time_seconds) {
return kTvmErrorFunctionCallNotImplemented;
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file apps/bundle_deploy/crt_config.h
* \brief CRT configuration for bundle_deploy app.
*/
#ifndef TVM_RUNTIME_CRT_CONFIG_H_
#define TVM_RUNTIME_CRT_CONFIG_H_
/*! Log level of the CRT runtime */
#define TVM_CRT_LOG_LEVEL TVM_CRT_LOG_LEVEL_DEBUG
/*! Support low-level debugging in MISRA-C runtime */
#define TVM_CRT_DEBUG 0
/*! Maximum supported dimension in NDArray */
#define TVM_CRT_MAX_NDIM 6
/*! Maximum supported arguments in generated functions */
#define TVM_CRT_MAX_ARGS 10
/*! Maximum supported string length in dltype, e.g. "int8", "int16", "float32" */
#define TVM_CRT_MAX_STRLEN_DLTYPE 10
/*! Maximum supported string length in function names */
#define TVM_CRT_MAX_STRLEN_FUNCTION_NAME 120
/*! Maximum supported string length in parameter names */
#define TVM_CRT_MAX_STRLEN_PARAM_NAME 80
/*! Maximum number of registered modules. */
#define TVM_CRT_MAX_REGISTERED_MODULES 2
/*! Size of the global function registry, in bytes. */
#define TVM_CRT_GLOBAL_FUNC_REGISTRY_SIZE_BYTES 512
/*! Maximum packet size, in bytes, including the length header. */
#define TVM_CRT_MAX_PACKET_SIZE_BYTES 512
#endif // TVM_RUNTIME_CRT_CONFIG_H_
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
extern const char build_graph_c_json[];
extern unsigned int build_graph_c_json_len;
extern const char build_params_c_bin[];
extern unsigned int build_params_c_bin_len;
int main(int argc, char** argv) {
assert(argc == 2 && "Usage: demo_static <cat.bin>");
char* json_data = (char*)(build_graph_c_json);
char* params_data = (char*)(build_params_c_bin);
uint64_t params_size = build_params_c_bin_len;
struct timeval t0, t1, t2, t3, t4, t5;
gettimeofday(&t0, 0);
void* handle = tvm_runtime_create(json_data, params_data, params_size, argv[0]);
gettimeofday(&t1, 0);
float input_storage[1 * 3 * 224 * 224];
FILE* fp = fopen(argv[1], "rb");
(void)fread(input_storage, 3 * 224 * 224, 4, fp);
fclose(fp);
DLTensor input;
input.data = input_storage;
DLDevice dev = {kDLCPU, 0};
input.device = dev;
input.ndim = 4;
DLDataType dtype = {kDLFloat, 32, 1};
input.dtype = dtype;
int64_t shape[4] = {1, 3, 224, 224};
input.shape = shape;
input.strides = NULL;
input.byte_offset = 0;
tvm_runtime_set_input(handle, "data", &input);
gettimeofday(&t2, 0);
tvm_runtime_run(handle);
gettimeofday(&t3, 0);
float output_storage[OUTPUT_LEN];
DLTensor output;
output.data = output_storage;
DLDevice out_dev = {kDLCPU, 0 |
};
output.device = out_dev;
output.ndim = 2;
DLDataType out_dtype = {kDLFloat, 32, 1};
output.dtype = out_dtype;
int64_t out_shape[2] = {1, OUTPUT_LEN};
output.shape = out_shape;
output.strides = NULL;
output.byte_offset = 0;
tvm_runtime_get_output(handle, 0, &output);
gettimeofday(&t4, 0);
float max_iter = -FLT_MAX;
int32_t max_index = -1;
for (int i = 0; i < OUTPUT_LEN; ++i) {
if (output_storage[i] > max_iter) {
max_iter = output_storage[i];
max_index = i;
}
}
tvm_runtime_destroy(handle);
gettimeofday(&t5, 0);
printf("The maximum position in output vector is: %d, with max-value %f.\n", max_index, max_iter);
printf(
"timing: %.2f ms (create), %.2f ms (set_input), %.2f ms (run), "
"%.2f ms (get_output), %.2f ms (destroy)\n",
(t1.tv_sec - t0.tv_sec) * 1000 + (t1.tv_usec - t0.tv_usec) / 1000.f,
(t2.tv_sec - t1.tv_sec) * 1000 + (t2.tv_usec - t1.tv_usec) / 1000.f,
(t3.tv_sec - t2.tv_sec) * 1000 + (t3.tv_usec - t2.tv_usec) / 1000.f,
(t4.tv_sec - t3.tv_sec) * 1000 + (t4.tv_usec - t3.tv_usec) / 1000.f,
(t5.tv_sec - t4.tv_sec) * 1000 + (t5.tv_usec - t4.tv_usec) / 1000.f);
return 0;
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
int main(int argc, char** argv) {
assert(argc == 5 && "Usage: test_static <data.bin> <output.bin> <graph.json> <params.bin>");
struct stat st;
char* json_data;
char* params_data;
uint64_t params_size;
FILE* fp = fopen(argv[3], "rb");
stat(argv[3], &st);
json_data = (char*)malloc(st.st_size);
fread(json_data, st.st_size, 1, fp);
fclose(fp);
fp = fopen(argv[4], "rb");
stat(argv[4], &st);
params_data = (char*)malloc(st.st_size);
fread(params_data, st.st_size, 1, fp);
params_size = st.st_size;
fclose(fp);
struct timeval t0, t1, t2, t3, t4, t5;
gettimeofday(&t0, 0);
void* handle = tvm_runtime_create(json_data, params_data, params_size, argv[0]);
gettimeofday(&t1, 0);
float input_storage[10 * 5];
fp = fopen(argv[1], "rb");
fread(input_storage, 10 * 5, 4, fp);
fclose(fp);
float result_storage[10 * 5];
fp = fopen(argv[2], "rb");
fread(result_storage, 10 * 5, 4, fp);
fclose(fp);
DLTensor input;
input.data = input_storage;
DLDevice dev = {kDLCPU, 0};
input.device = dev;
input.ndim = 2;
DLDataType dtype = {kDLFloat, 32, 1};
input.dtype = dtype;
int64_t shape[2] = {10, 5};
input.shape = shape;
input.strides = NULL;
input.byte_offset = 0;
tvm_runtime_set_input(handle, "x" |
, &input);
gettimeofday(&t2, 0);
tvm_runtime_run(handle);
gettimeofday(&t3, 0);
float output_storage[10 * 5];
DLTensor output;
output.data = output_storage;
DLDevice out_dev = {kDLCPU, 0};
output.device = out_dev;
output.ndim = 2;
DLDataType out_dtype = {kDLFloat, 32, 1};
output.dtype = out_dtype;
int64_t out_shape[2] = {10, 5};
output.shape = out_shape;
output.strides = NULL;
output.byte_offset = 0;
tvm_runtime_get_output(handle, 0, &output);
gettimeofday(&t4, 0);
for (int i = 0; i < 10 * 5; ++i) {
assert(fabs(output_storage[i] - result_storage[i]) < 1e-5f);
if (fabs(output_storage[i] - result_storage[i]) >= 1e-5f) {
printf("got %f, expected %f\n", output_storage[i], result_storage[i]);
}
}
tvm_runtime_destroy(handle);
gettimeofday(&t5, 0);
printf(
"timing: %.2f ms (create), %.2f ms (set_input), %.2f ms (run), "
"%.2f ms (get_output), %.2f ms (destroy)\n",
(t1.tv_sec - t0.tv_sec) * 1000 + (t1.tv_usec - t0.tv_usec) / 1000.f,
(t2.tv_sec - t1.tv_sec) * 1000 + (t2.tv_usec - t1.tv_usec) / 1000.f,
(t3.tv_sec - t2.tv_sec) * 1000 + (t3.tv_usec - t2.tv_usec) / 1000.f,
(t4.tv_sec - t3.tv_sec) * 1000 + (t4.tv_usec - t3.tv_usec) / 1000.f,
(t5.tv_sec - t4.tv_sec) * 1000 + (t5.tv_usec - t4.tv_usec) / 1000.f);
free(json_data);
free(params_data);
return 0;
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file rpc_env.h
* \brief Server environment of the RPC.
*/
#ifndef TVM_APPS_CPP_RPC_ENV_H_
#define TVM_APPS_CPP_RPC_ENV_H_
#include <tvm/runtime/registry.h>
#include <string>
namespace tvm {
namespace runtime {
/*!
* \brief RPCEnv The RPC Environment parameters for c++ rpc server
*/
struct RPCEnv {
public:
/*!
* \brief Constructor Init The RPC Environment initialize function
*/
RPCEnv(const std::string& word_dir = "");
/*!
* \brief GetPath To get the workpath from packed function
* \param name The file name
* \return The full path of file.
*/
std::string GetPath(const std::string& file_name) const;
/*!
* \brief The RPC Environment cleanup function
*/
void CleanUp() const;
private:
/*!
* \brief Holds the environment path.
*/
std::string base_;
}; // RPCEnv
} // namespace runtime
} // namespace tvm
#endif // TVM_APPS_CPP_RPC_ENV_H_
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file rpc_server.h
* \brief RPC Server implementation.
*/
namespace tvm {
namespace runtime {
/*!
* \brief ServerLoopFromChild The Server loop process.
* \param sock The socket information
* \param addr The socket address information
*/
void ServerLoopFromChild(SOCKET socket);
/*!
* \brief RPCServerCreate Creates the RPC Server.
* \param host The hostname of the server, Default=0.0.0.0
* \param port The port of the RPC, Default=9090
* \param port_end The end search port of the RPC, Default=9099
* \param tracker The address of RPC tracker in host:port format e.g. 10.77.1.234:9190 Default=""
* \param key The key used to identify the device type in tracker. Default=""
* \param custom_addr Custom IP Address to Report to RPC Tracker. Default=""
* \param work_dir Custom work directory. Default=""
* \param silent Whether run in silent mode. Default=True
*/
void RPCServerCreate(std::string host = "", int port = 9090, int port_end = 9099,
std::string tracker_addr = "", std::string key = "",
std::string custom_addr = "", std::string work_dir = "", bool silent = true);
}
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file rpc_tracker_client.h
* \brief RPC Tracker client to report resources.
*/
namespace tvm {
namespace runtime {
/*!
* \brief TrackerClient Tracker client class.
* \param tracker The address of RPC tracker in host:port format e.g. 10.77.1.234:9190 Default=""
* \param key The key used to identify the device type in tracker. Default=""
* \param custom_addr Custom IP Address to Report to RPC Tracker. Default=""
*/ |
class TrackerClient {
public:
/*!
* \brief Constructor.
*/
TrackerClient(const std::string& tracker_addr, const std::string& key,
const std::string& custom_addr, int port)
: tracker_addr_(tracker_addr),
key_(key),
custom_addr_(custom_addr),
port_(port),
gen_(std::random_device{}()),
dis_(0.0, 1.0) {
if (custom_addr_.empty()) {
custom_addr_ = "null";
} else {
custom_addr_ = "\"" + custom_addr_ + "\"";
}
}
/*!
* \brief Destructor.
*/
~TrackerClient() {
Close();
}
/*!
* \brief IsValid Check tracker is valid.
*/
bool IsValid() { return (!tracker_addr_.empty() && !tracker_sock_.IsClosed()); }
/*!
* \brief TryConnect Connect to tracker if the tracker address is valid.
*/
void TryConnect() {
if (!tracker_addr_.empty() && (tracker_sock_.IsClosed())) {
tracker_sock_ = ConnectWithRetry();
int code = kRPCTrackerMagic;
ICHECK_EQ(tracker_sock_.SendAll(&code, sizeof(code)), sizeof(code));
ICHECK_EQ(tracker_sock_.RecvAll(&code, sizeof(code)), sizeof(code));
ICHECK_EQ(code, kRPCTrackerMagic) << tracker_addr_.c_str() << " is not RPC Tracker";
std::ostringstream ss;
ss << "[" << static_cast<int>(TrackerCode::kUpdateInfo) << ", {\"key\": \"server:" << key_
<< "\", \"addr\": [" << custom_addr_ << ", \"" << port_ << "\"]}]";
tracker_sock_.SendBytes(ss.str());
std::string remote_status = tracker_sock_.RecvBytes();
ICHECK_EQ(std::stoi(remote_status), static_cast<int>(TrackerCode::kSuccess));
}
}
/*!
* \brief Close Clean up tracker resources.
*/
void Close() {
if (!tracker_sock_.IsClosed()) {
tracker_sock_.Close();
}
}
/*!
* \brief ReportResourceAndGetKey Report resource to tracker.
* \param port listening port.
* \param matchkey Random match key output.
*/
void ReportResourceAndGetKey(int port, std::string* matchkey) {
if (!tracker_sock_.Is |
Closed()) {
*matchkey = RandomKey(key_ + ":", old_keyset_);
std::ostringstream ss;
ss << "[" << static_cast<int>(TrackerCode::kPut) << ", \"" << key_ << "\", [" << port
<< ", \"" << *matchkey << "\"], " << custom_addr_ << "]";
tracker_sock_.SendBytes(ss.str());
std::string remote_status = tracker_sock_.RecvBytes();
ICHECK_EQ(std::stoi(remote_status), static_cast<int>(TrackerCode::kSuccess));
} else {
*matchkey = key_;
}
}
/*!
* \brief ReportResourceAndGetKey Report resource to tracker.
* \param listen_sock Listen socket details for select.
* \param port listening port.
* \param ping_period Select wait time.
* \param matchkey Random match key output.
*/
void WaitConnectionAndUpdateKey(support::TCPSocket listen_sock, int port, int ping_period,
std::string* matchkey) {
int unmatch_period_count = 0;
int unmatch_timeout = 4;
while (true) {
if (!tracker_sock_.IsClosed()) {
support::PollHelper poller;
poller.WatchRead(listen_sock.sockfd);
poller.Poll(ping_period * 1000);
if (!poller.CheckRead(listen_sock.sockfd)) {
std::ostringstream ss;
ss << "[" << int(TrackerCode::kGetPendingMatchKeys) << "]";
tracker_sock_.SendBytes(ss.str());
std::string pending_keys = tracker_sock_.RecvBytes();
old_keyset_.insert(*matchkey);
if (pending_keys.find(*matchkey) == std::string::npos) {
unmatch_period_count += 1;
} else {
unmatch_period_count = 0;
}
if (unmatch_period_count * ping_period > unmatch_timeout + ping_period) {
LOG(INFO) << "no incoming connections, regenerate key ...";
*matchkey = RandomKey(key_ + ":", old_keyset_);
std::ostringstream ss;
ss << "[" << static_cast<int>(TrackerCode::kPut) << ", \"" << key_ << "\", [" << port
<< ", |
\"" << *matchkey << "\"], " << custom_addr_ << "]";
tracker_sock_.SendBytes(ss.str());
std::string remote_status = tracker_sock_.RecvBytes();
ICHECK_EQ(std::stoi(remote_status), static_cast<int>(TrackerCode::kSuccess));
unmatch_period_count = 0;
}
continue;
}
}
break;
}
}
private:
/*!
* \brief Connect to a RPC address with retry.
This function is only reliable to short period of server restart.
* \param timeout Timeout during retry
* \param retry_period Number of seconds before we retry again.
* \return TCPSocket The socket information if connect is success.
*/
support::TCPSocket ConnectWithRetry(int timeout = 60, int retry_period = 5) {
auto tbegin = std::chrono::system_clock::now();
while (true) {
support::SockAddr addr(tracker_addr_);
support::TCPSocket sock;
sock.Create();
LOG(INFO) << "Tracker connecting to " << addr.AsString();
if (sock.Connect(addr)) {
return sock;
}
auto period = (std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now() - tbegin))
.count();
ICHECK(period < timeout) << "Failed to connect to server" << addr.AsString();
LOG(WARNING) << "Cannot connect to tracker " << addr.AsString() << " retry in "
<< retry_period << " seconds.";
std::this_thread::sleep_for(std::chrono::seconds(retry_period));
}
}
/*!
* \brief Random Generate a random number between 0 and 1.
* \return random float value.
*/
float Random() { return dis_(gen_); }
/*!
* \brief Generate a random key.
* \param prefix The string prefix.
* \return cmap The conflict map set.
*/
std::string RandomKey(const std::string& prefix, const std::set<std::string>& cmap) {
if (!cmap.empty()) {
while (true) {
std::string key = prefix + std::to_string(Random());
if (cmap.find(key) == cmap.end |
()) {
return key;
}
}
}
return prefix + std::to_string(Random());
}
std::string tracker_addr_;
std::string key_;
std::string custom_addr_;
int port_;
support::TCPSocket tracker_sock_;
std::set<std::string> old_keyset_;
std::mt19937 gen_;
std::uniform_real_distribution<float> dis_;
};
}
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file win32_process.h
* \brief Win32 process code to mimic a POSIX fork()
*/
#ifndef TVM_APPS_CPP_RPC_WIN32_PROCESS_H_
#define TVM_APPS_CPP_RPC_WIN32_PROCESS_H_
#include <chrono>
#include <string>
#include "../../src/support/socket.h"
namespace tvm {
namespace runtime {
/*!
* \brief SpawnRPCChild Spawns a child process with a given timeout to run
* \param fd The client socket to duplicate in the child
* \param timeout The time in seconds to wait for the child to complete before termination
*/
void SpawnRPCChild(SOCKET fd, std::chrono::seconds timeout);
/*!
* \brief ChildProcSocketHandler Ran from the child process and runs server to handle the client
* socket \param mmap_path The memory mapped file path that will contain the information to
* duplicate the client socket from the parent
*/
void ChildProcSocketHandler(const std::string& mmap_path);
} // namespace runtime
} // namespace tvm
#endif // TVM_APPS_CPP_RPC_WIN32_PROCESS_H_
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import os
def test_plugin_module():
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
mod = tvm.runtime.load_module(os.path.join(curr_path, "lib", "plugin_module.so"))
# NOTE: we need to make sure all managed resources returned
# from mod get destructed before mod get unloaded.
#
# Failure mode we want to prevent from:
# We retain an object X whose destructor is within mod.
# The program will segfault if X get destructed after mod,
# because the destructor function has already been unloaded.
#
# The easiest way to achieve this is to wrap the
# logics related to mod inside a function.
def run_module(mod):
# normal functions
assert mod["AddOne"](10) == 11
assert mod["SubOne"](10) == 9
# advanced usecase: return a module
mymod = mod["CreateMyModule"](10)
fadd = mymod["add"]
assert fadd(10) == 20
assert mymod["mul"](10) == 100
run_module(mod)
if __name__ == "__main__":
test_plugin_module()
|
"""Example extension package of TVM."""
from __future__ |
import absolute_ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.