text
stringlengths 1
2.05k
|
---|
["api/python/rpc.html", "../../reference/api/python/rpc.html"],
["api/python/runtime.html", "../../reference/api/python/runtime.html"],
["api/python/target.html", "../../reference/api/python/target.html"],
["api/python/te.html", "../../reference/api/python/te.html"],
["api/python/tir.html", "../../reference/api/python/tir.html"],
["api/python/topi.html", "../../reference/api/python/topi.html"],
["api/python/vta/index.html", "../../../reference/api/python/vta/index.html"],
["langref/hybrid_script.html", "../reference/langref/hybrid_script.html"],
["langref/index.html", "../reference/langref/index.html"],
["langref/relay_adt.html", "../reference/langref/relay_adt.html"],
["langref/relay_expr.html", "../reference/langref/relay_expr.html"],
["langref/relay_op.html", "../reference/langref/relay_op.html"],
["langref/relay_pattern.html", "../reference/langref/relay_pattern.html"],
["langref/relay_type.html", "../reference/langref/relay_type.html"],
["microtvm/index.html", "../topic/microtvm/index.html"],
["vta/dev/config.html", "../../topic/vta/dev/config.html"],
["vta/dev/hardware.html", "../../topic/vta/dev/hardware.html"],
["vta/dev/index.html", "../../topic/vta/dev/index.html"],
["vta/index.html", "../topic/vta/index.html"],
["vta/install.html", "../topic/vta/install.html"],
["tutorials/index.html", "../tutorial/index.html"],
["tutorials/frontend/from_caffe2.html", "../../how_to/compile_models/from_caffe2.html"],
["tutorials/frontend/from_coreml.html", "../../how_to/compile_models/from_coreml.html"],
["tutorials/frontend/from_darknet.html", "../../how_to/compile_models/from_darknet.html"],
["tutorials/frontend/from_keras.html", "../../how_to/compile_models/from_keras.html"],
["tutorials/frontend/from_mxnet.html", "../../how_to/compile_models/from_mxnet.html"],
["tutorials/frontend/from_onnx.html", "../../how_to/compile_models/from_onnx.html"],
["tutorials/frontend/from_paddle.html", "../../how_to/compile_ |
models/from_paddle.html"],
["tutorials/frontend/from_pytorch.html", "../../how_to/compile_models/from_pytorch.html"],
["tutorials/frontend/from_tensorflow.html", "../../how_to/compile_models/from_tensorflow.html"],
["tutorials/frontend/from_tflite.html", "../../how_to/compile_models/from_tflite.html"],
[
"tutorials/frontend/deploy_model_on_android.html",
"../../how_to/deploy_models/deploy_model_on_android.html",
],
[
"tutorials/frontend/deploy_model_on_rasp.html",
"../../how_to/deploy_models/deploy_model_on_rasp.html",
],
[
"tutorials/frontend/deploy_object_detection_pytorch.html",
"../../how_to/deploy_models/deploy_object_detection_pytorch.html",
],
[
"tutorials/frontend/deploy_prequantized.html",
"../../how_to/deploy_models/deploy_prequantized.html",
],
[
"tutorials/frontend/deploy_prequantized_tflite.html",
"../../how_to/deploy_models/deploy_prequantized_tflite.html",
],
[
"tutorials/frontend/deploy_quantized.html",
"../../how_to/deploy_models/deploy_quantized.html",
],
["tutorials/frontend/deploy_sparse.html", "../../how_to/deploy_models/deploy_sparse.html"],
[
"tutorials/frontend/deploy_ssd_gluoncv.html",
"../../how_to/deploy_models/deploy_ssd_gluoncv.html",
],
[
"tutorials/dev/bring_your_own_datatypes.html",
"../../how_to/extend_tvm/bring_your_own_datatypes.html",
],
[
"tutorials/dev/low_level_custom_pass.html",
"../../how_to/extend_tvm/low_level_custom_pass.html",
],
["tutorials/dev/use_pass_infra.html", "../../how_to/extend_tvm/use_pass_infra.html"],
["tutorials/dev/use_pass_instrument.html", "../../how_to/extend_tvm/use_pass_instrument.html"],
["tutorials/optimize/opt_conv_cuda.html", "../../how_to/optimize_operators/opt_conv_cuda.html"],
[
"tutorials/optimize/opt_conv_tensorcore.html",
"../../how_to/optimize_operators/opt_conv_tensorcore.html", |
],
["tutorials/optimize/opt_gemm.html", "../../how_to/optimize_operators/opt_gemm.html"],
[
"tutorials/auto_scheduler/tune_conv2d_layer_cuda.html",
"../../how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.html",
],
[
"tutorials/auto_scheduler/tune_network_arm.html",
"../../how_to/tune_with_autoscheduler/tune_network_arm.html",
],
[
"tutorials/auto_scheduler/tune_network_cuda.html",
"../../how_to/tune_with_autoscheduler/tune_network_cuda.html",
],
[
"tutorials/auto_scheduler/tune_network_mali.html",
"../../how_to/tune_with_autoscheduler/tune_network_mali.html",
],
[
"tutorials/auto_scheduler/tune_network_x86.html",
"../../how_to/tune_with_autoscheduler/tune_network_x86.html",
],
[
"tutorials/auto_scheduler/tune_sparse_x86.html",
"../../how_to/tune_with_autoscheduler/tune_sparse_x86.html",
],
[
"tutorials/autotvm/tune_conv2d_cuda.html",
"../../how_to/tune_with_autotvm/tune_conv2d_cuda.html",
],
["tutorials/autotvm/tune_relay_arm.html", "../../how_to/tune_with_autotvm/tune_relay_arm.html"],
[
"tutorials/autotvm/tune_relay_cuda.html",
"../../how_to/tune_with_autotvm/tune_relay_cuda.html",
],
[
"tutorials/autotvm/tune_relay_mobile_gpu.html",
"../../how_to/tune_with_autotvm/tune_relay_mobile_gpu.html",
],
["tutorials/autotvm/tune_relay_x86.html", "../../how_to/tune_with_autotvm/tune_relay_x86.html"],
["tutorials/micro/micro_autotune.html", "../../how_to/work_with_microtvm/micro_autotune.html"],
[
"tutorials/micro/micro_reference_vm.html",
"../../how_to/work_with_microtvm/micro_reference_vm.html",
],
["tutorials/micro/micro_tflite.html", "../../how_to/work_with_microtvm/micro_tflite.html"],
["tutorials/frontend/build_gcn.html", "../../how_to/work_with_relay/build_gcn.html"],
[
"tutorials/frontend/using_external_lib.html",
"../../how |
_to/work_with_relay/using_external_lib.html",
],
["tutorials/language/extern_op.html", "../../how_to/work_with_schedules/extern_op.html"],
["tutorials/language/intrin_math.html", "../../how_to/work_with_schedules/intrin_math.html"],
["tutorials/language/reduction.html", "../../how_to/work_with_schedules/reduction.html"],
["tutorials/language/scan.html", "../../how_to/work_with_schedules/scan.html"],
[
"tutorials/language/schedule_primitives.html",
"../../how_to/work_with_schedules/schedule_primitives.html",
],
["tutorials/language/tedd.html", "../../how_to/work_with_schedules/tedd.html"],
["tutorials/language/tensorize.html", "../../how_to/work_with_schedules/tensorize.html"],
["tutorials/language/tuple_inputs.html", "../../how_to/work_with_schedules/tuple_inputs.html"],
[
"tutorials/get_started/auto_scheduler_matmul_x86.html",
"../../tutorial/auto_scheduler_matmul_x86.html",
],
["tutorials/get_started/autotvm_matmul_x86.html", "../../tutorial/autotvm_matmul_x86.html"],
["tutorials/get_started/autotvm_relay_x86.html", "../../tutorial/autotvm_relay_x86.html"],
[
"tutorials/get_started/cross_compilation_and_rpc.html",
"../../tutorial/cross_compilation_and_rpc.html",
],
["tutorials/get_started/install.html", "../../tutorial/install.html"],
["tutorials/topi/intro_topi.html", "../../tutorial/intro_topi.html"],
["tutorials/get_started/introduction.html", "../../tutorial/introduction.html"],
["tutorials/get_started/relay_quick_start.html", "../../tutorial/relay_quick_start.html"],
[
"tutorials/get_started/tensor_expr_get_started.html",
"../../tutorial/tensor_expr_get_started.html",
],
[
"tutorials/get_started/tvmc_command_line_driver.html",
"../../tutorial/tvmc_command_line_driver.html",
],
[
"tutorials/get_started/tvmc_python.html",
"../../tutorial/tvmc_python.html",
],
]
redirect_template = """
<!DOCTYPE html>
<html>
< |
head>
<meta http-equiv="refresh" content="1; url=$to" />
<script>
window.location.href = "$to"
</script>
</head>
</html>
"""
def build_legacy_redirect(tvm_path):
def legacy_redirect(app, docname):
if app.builder.name == "html":
src = Template(redirect_template)
for frm, to in legacy_redirects:
frm = tvm_path.resolve() / "docs" / "_build" / "html" / frm
redirect = src.substitute({"to": to})
os.makedirs(os.path.dirname(frm), exist_ok=True)
with open(frm, "w") as f:
f.write(redirect)
return legacy_redirect |
import argparse |
import pathlib
BASH = "
BASH_IGNORE = "
BASH_MULTILINE_COMMENT_START = ": '"
BASH_MULTILINE_COMMENT_END = "'"
def bash_to_python(src_path: pathlib.Path, dest_path: pathlib.Path):
"""Convert a bash script file to a Python format compatible with Sphinx doc."""
with open(src_path, "r") as src_f:
with open(dest_path, "w") as dest_f:
line = src_f.readline()
bash_block = []
bash_detected = False
bash_ignore_detected = False
new_line_required = False
while line:
line = line.strip("\n").strip("\r")
if bash_detected:
if line == BASH:
if new_line_required:
dest_f.write("\n")
python_code = "
for bash_line in bash_block:
python_code += f"
python_code += "
dest_f.write(python_code)
bash_detected = False
bash_block = []
new_line_required = True
else:
bash_block.append(line)
elif bash_ignore_detected:
if line == BASH_IGNORE:
bash_ignore_detected = False
new_line_required = True
else:
new_line_required = False
pass
else:
if line == BASH:
bash_detected = True
elif line == BASH_IGNORE:
bash_ignore_detected = True
elif line in [BASH_MULTILINE_COMMENT_START, BASH_MULTILINE_COMMENT_END]:
if new_line_required:
dest_f.write("\n")
dest_f.write('"""')
new_line_required = True |
else:
if new_line_required:
dest_f.write("\n")
dest_f.write(f"{line}")
new_line_required = True
line = src_f.readline()
if new_line_required:
dest_f.write("\n")
def main():
parser = argparse.ArgumentParser(description="Convert tutorial script to Python.")
parser.add_argument("script", type=str, help="Path to script file.")
args = parser.parse_args()
src_path = pathlib.Path(args.script)
dest_path = src_path.parent / f"{src_path.stem}.py"
bash_to_python(src_path, dest_path)
if __name__ == "__main__":
main() |
"""
Compile CoreML Models
=====================
**Author**: `Joshua Z. Zhang <https:
`Kazutaka Morita <https:
`Zhao Wu <https:
This article is an introductory tutorial to deploy CoreML models with Relay.
For us to begin with, coremltools module is required to be installed.
A quick solution is to install via pip
.. code-block:: bash
pip install -U coremltools --user
or please refer to official site
https:
"""
from tvm |
import testing
testing.utils.install_request_hook(depth=3) |
import tvm
from tvm |
import te |
import tvm.relay as relay
from tvm.contrib.download |
import download_testdata |
import coremltools as cm |
import numpy as np
from PIL |
import Image
model_url = "https:
model_file = "mobilenet.mlmodel"
model_path = download_testdata(model_url, model_file, module="coreml")
mlmodel = cm.models.MLModel(model_path)
img_url = "https:
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
img_bgr = np.array(img)[:, :, ::-1]
x = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
target = "llvm"
shape_dict = {"image": x.shape}
mod, params = relay.frontend.from_coreml(mlmodel, shape_dict)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
from tvm.contrib |
import graph_executor
dev = tvm.cpu(0)
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("image", tvm.nd.array(x.astype(dtype)))
m.run()
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.numpy()[0])
synset_url = "".join(
[
"https:
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
print("Top-1 id", top1, "class name", synset[top1]) |
"""
Compile YOLO-V2 and YOLO-V3 in DarkNet Models
=============================================
**Author**: `Siju Samuel <https:
This article is an introductory tutorial to deploy darknet models with TVM.
All the required models and libraries will be downloaded from the internet by the script.
This script runs the YOLO-V2 and YOLO-V3 Model with the bounding boxes
Darknet parsing have dependancy with CFFI and CV2 library
Please install CFFI and CV2 before executing this script
.. code-block:: bash
pip install cffi
pip install opencv-python
"""
from tvm |
import testing
testing.utils.install_request_hook(depth=3) |
import numpy as np |
import matplotlib.pyplot as plt |
import sys |
import tvm
from tvm |
import te
from tvm |
import relay
from ctypes |
import *
from tvm.contrib.download |
import download_testdata
from tvm.relay.testing.darknet |
import __darknetffi__ |
import tvm.relay.testing.yolo_detection |
import tvm.relay.testing.darknet
MODEL_NAME = "yolov3"
CFG_NAME = MODEL_NAME + ".cfg"
WEIGHTS_NAME = MODEL_NAME + ".weights"
REPO_URL = "https:
CFG_URL = REPO_URL + "cfg/" + CFG_NAME + "?raw=true"
WEIGHTS_URL = "https:
cfg_path = download_testdata(CFG_URL, CFG_NAME, module="darknet")
weights_path = download_testdata(WEIGHTS_URL, WEIGHTS_NAME, module="darknet")
if sys.platform in ["linux", "linux2"]:
DARKNET_LIB = "libdarknet2.0.so"
DARKNET_URL = REPO_URL + "lib/" + DARKNET_LIB + "?raw=true"
elif sys.platform == "darwin":
DARKNET_LIB = "libdarknet_mac2.0.so"
DARKNET_URL = REPO_URL + "lib_osx/" + DARKNET_LIB + "?raw=true"
else:
err = "Darknet lib is not supported on {} platform".format(sys.platform)
raise NotImplementedError(err)
lib_path = download_testdata(DARKNET_URL, DARKNET_LIB, module="darknet")
DARKNET_LIB = __darknetffi__.dlopen(lib_path)
net = DARKNET_LIB.load_network(cfg_path.encode("utf-8"), weights_path.encode("utf-8"), 0)
dtype = "float32"
batch_size = 1
data = np.empty([batch_size, net.c, net.h, net.w], dtype)
shape_dict = {"data": data.shape}
print("Converting darknet to relay functions...")
mod, params = relay.frontend.from_darknet(net, dtype=dtype, shape=data.shape)
target = tvm.target.Target("llvm", host="llvm")
dev = tvm.cpu(0)
data = np.empty([batch_size, net.c, net.h, net.w], dtype)
shape = {"data": data.shape}
print("Compiling the model...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
[neth, netw] = shape["data"][2:]
test_image = "dog.jpg"
print("Loading the test image...")
img_url = REPO_URL + "data/" + test_image + "?raw=true"
img_path = download_testdata(img_url, test_image, "data")
data = tvm.relay.testing.darknet.load_image(img_path, netw, neth)
from tvm.contrib |
import graph_executor
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("data", tvm.nd.array(data.astype(dtype)))
print("Running the test image...")
thresh = 0.5
nms_thresh = 0.45
m.run()
tvm_out = []
if MODEL_NAME == "yolov2":
layer_out = {}
layer_out["type"] = "Region"
layer_attr = m.get_output(2).numpy()
layer_out["biases"] = m.get_output(1).numpy()
out_shape = (layer_attr[0], layer_attr[1]
layer_out["output"] = m.get_output(0).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
layer_out["coords"] = layer_attr[5]
layer_out["background"] = layer_attr[6]
tvm_out.append(layer_out)
elif MODEL_NAME == "yolov3":
for i in range(3):
layer_out = {}
layer_out["type"] = "Yolo"
layer_attr = m.get_output(i * 4 + 3).numpy()
layer_out["biases"] = m.get_output(i * 4 + 2).numpy()
layer_out["mask"] = m.get_output(i * 4 + 1).numpy()
out_shape = (layer_attr[0], layer_attr[1]
layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
tvm_out.append(layer_out)
elif MODEL_NAME == "yolov3-tiny":
for i in range(2):
layer_out = {}
layer_out["type"] = "Yolo"
layer_attr = m.get_output(i * 4 + 3).numpy()
layer_out["biases"] = m.get_output(i * 4 + 2).numpy()
layer_out["mask"] = m.get_output(i * 4 + 1).numpy()
out_shape = (layer_attr[0], layer_attr[1]
layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
tvm_out.append(layer_out)
thresh = 0.560
img = tvm.relay.testing.darknet.load_image_color(img_path)
_, im_h, im_w = img.shape
dets = tvm.relay.testing.yolo_detection.fill_network_boxes(
(netw, neth), (im_w, im_h), thresh, 1, tvm_out
)
last_layer = net.layers[net.n - 1]
tvm.relay.testing.yolo_detection.do_nms_sort(dets, last_layer.classes, nms_thresh)
coco_name = "coco.names"
co |
co_url = REPO_URL + "data/" + coco_name + "?raw=true"
font_name = "arial.ttf"
font_url = REPO_URL + "data/" + font_name + "?raw=true"
coco_path = download_testdata(coco_url, coco_name, module="data")
font_path = download_testdata(font_url, font_name, module="data")
with open(coco_path) as f:
content = f.readlines()
names = [x.strip() for x in content]
tvm.relay.testing.yolo_detection.show_detections(img, dets, thresh, names, last_layer.classes)
tvm.relay.testing.yolo_detection.draw_detections(
font_path, img, dets, thresh, names, last_layer.classes
)
plt.imshow(img.transpose(1, 2, 0))
plt.show() |
"""
Compile Keras Models
=====================
**Author**: `Yuwei Hu <https:
This article is an introductory tutorial to deploy keras models with Relay.
For us to begin with, keras should be installed.
Tensorflow is also required since it's used as the default backend of keras.
A quick solution is to install via pip
.. code-block:: bash
pip install -U keras --user
pip install -U tensorflow --user
or please refer to official site
https:
"""
from tvm |
import testing
testing.utils.install_request_hook(depth=3) |
import tvm
from tvm |
import te |
import tvm.relay as relay
from tvm.contrib.download |
import download_testdata |
import keras |
import tensorflow as tf |
import numpy as np
if tuple(keras.__version__.split(".")) < ("2", "4", "0"):
weights_url = "".join(
[
"https:
"download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
]
)
weights_file = "resnet50_keras_old.h5"
else:
weights_url = "".join(
[
" https:
"resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
]
)
weights_file = "resnet50_keras_new.h5"
weights_path = download_testdata(weights_url, weights_file, module="keras")
keras_resnet50 = tf.keras.applications.resnet50.ResNet50(
include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
keras_resnet50.load_weights(weights_path)
from PIL |
import Image
from matplotlib |
import pyplot as plt
from tensorflow.keras.applications.resnet50 |
import preprocess_input
img_url = "https:
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
plt.imshow(img)
plt.show()
data = np.array(img)[np.newaxis, :].astype("float32")
data = preprocess_input(data).transpose([0, 3, 1, 2])
print("input_1", data.shape)
shape_dict = {"input_1": data.shape}
mod, params = relay.frontend.from_keras(keras_resnet50, shape_dict)
target = "cuda"
dev = tvm.cuda(0)
with tvm.transform.PassContext(opt_level=0):
model = relay.build_module.create_executor("graph", mod, dev, target, params).evaluate()
dtype = "float32"
tvm_out = model(tvm.nd.array(data.astype(dtype)))
top1_tvm = np.argmax(tvm_out.numpy()[0])
synset_url = "".join(
[
"https:
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
print("Relay top-1 id: {}, class name: {}".format(top1_tvm, synset[top1_tvm]))
keras_out = keras_resnet50.predict(data.transpose([0, 2, 3, 1]))
top1_keras = np.argmax(keras_out)
print("Keras top-1 id: {}, class name: {}".format(top1_keras, synset[top1_keras])) |
"""
.. _tutorial-from-mxnet:
Compile MXNet Models
====================
**Author**: `Joshua Z. Zhang <https:
`Kazutaka Morita <https:
This article is an introductory tutorial to deploy mxnet models with Relay.
For us to begin with, mxnet module is required to be installed.
A quick solution is
.. code-block:: bash
pip install mxnet --user
or please refer to official installation guide.
https:
"""
from tvm |
import testing
testing.utils.install_request_hook(depth=3) |
import mxnet as mx |
import tvm |
import tvm.relay as relay |
import numpy as np
from tvm.contrib.download |
import download_testdata
from mxnet.gluon.model_zoo.vision |
import get_model
from PIL |
import Image
from matplotlib |
import pyplot as plt
block = get_model("resnet18_v1", pretrained=True)
img_url = "https:
img_name = "cat.png"
synset_url = "".join(
[
"https:
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
img_path = download_testdata(img_url, "cat.png", module="data")
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
image = Image.open(img_path).resize((224, 224))
plt.imshow(image)
plt.show()
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
print("x", x.shape)
shape_dict = {"data": x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
target = "cuda"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, target, params=params)
from tvm.contrib |
import graph_executor
dev = tvm.cuda(0)
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.run()
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.numpy()[0])
print("TVM prediction top-1:", top1, synset[top1])
def block2symbol(block):
data = mx.sym.Variable("data")
sym = block(data)
args = {}
auxs = {}
for k, v in block.collect_params().items():
args[k] = mx.nd.array(v.data().asnumpy())
return sym, args, auxs
mx_sym, args, auxs = block2symbol(block)
mx.model.save_checkpoint("resnet18_v1", 0, mx_sym, args, auxs)
mx_sym, args, auxs = mx.model.load_checkpoint("resnet18_v1", 0)
mod, relay_params = relay.frontend.from_mxnet(mx_sym, shape_dict, arg_params=args, aux_params=auxs) |
"""
Compile OneFlow Models
======================
**Author**: `Xiaoyu Zhang <https:
This article is an introductory tutorial to deploy OneFlow models with Relay.
For us to begin with, OneFlow package should be installed.
A quick solution is to install via pip
.. code-block:: bash
pip install flowvision==0.1.0
python3 -m pip install -f https:
or please refer to official site:
https:
Currently, TVM supports OneFlow 0.7.0. Other versions may be unstable.
"""
from tvm |
import testing
testing.utils.install_request_hook(depth=3) |
import os, math
from matplotlib |
import pyplot as plt |
import numpy as np
from PIL |
import Image |
import flowvision |
import oneflow as flow |
import oneflow.nn as nn |
import tvm
from tvm |
import relay
from tvm.contrib.download |
import download_testdata
model_name = "resnet18"
model = getattr(flowvision.models, model_name)(pretrained=True)
model = model.eval()
model_dir = "resnet18_model"
if not os.path.exists(model_dir):
flow.save(model.state_dict(), model_dir)
from PIL |
import Image
img_url = "https:
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
from flowvision |
import transforms
my_preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img = my_preprocess(img)
img = np.expand_dims(img.numpy(), 0) |
class Graph(flow.nn.Graph):
def __init__(self, module):
super().__init__()
self.m = module
def build(self, x):
out = self.m(x)
return out
graph = Graph(model)
_ = graph._compile(flow.randn(1, 3, 224, 224))
mod, params = relay.frontend.from_oneflow(graph, model_dir)
target = tvm.target.Target("llvm", host="llvm")
dev = tvm.cpu(0)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
target = "cuda"
with tvm.transform.PassContext(opt_level=10):
intrp = relay.build_module.create_executor("graph", mod, tvm.cuda(0), target)
print(type(img))
print(img.shape)
tvm_output = intrp.evaluate()(tvm.nd.array(img.astype("float32")), **params)
synset_url = "".join(
[
"https:
"pretrained-models.pytorch/master/data/",
"imagenet_synsets.txt",
]
)
synset_name = "imagenet_synsets.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synsets = f.readlines()
synsets = [x.strip() for x in synsets]
splits = [line.split(" ") for line in synsets]
key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits}
class_url = "".join(
[
"https:
"pretrained-models.pytorch/master/data/",
"imagenet_classes.txt",
]
)
class_name = "imagenet_classes.txt"
class_path = download_testdata(class_url, class_name, module="data")
with open(class_path) as f:
class_id_to_key = f.readlines()
class_id_to_key = [x.strip() for x in class_id_to_key]
top1_tvm = np.argmax(tvm_output.numpy()[0])
tvm_class_key = class_id_to_key[top1_tvm]
with flow.no_grad():
torch_img = flow.from_numpy(img)
output = model(torch_img)
top_oneflow = np.argmax(output.numpy())
oneflow_class_key = class_id_to_key[top_oneflow]
print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key]))
print(
"OneFlow top-1 id: {}, class name: {}".format(top_oneflow, key_to_classname[oneflow_class_key])
) |
"""
Compile ONNX Models
===================
**Author**: `Joshua Z. Zhang <https:
This article is an introductory tutorial to deploy ONNX models with Relay.
For us to begin with, ONNX package must be installed.
A quick solution is to install protobuf compiler, and
.. code-block:: bash
pip install --user onnx onnxoptimizer
or please refer to official site.
https:
"""
from tvm |
import testing
testing.utils.install_request_hook(depth=3) |
import onnx |
import numpy as np |
import tvm
from tvm |
import te |
import tvm.relay as relay
from tvm.contrib.download |
import download_testdata
model_url = "".join(
[
"https:
"bcda4716699ac97ea44f791c24310193/raw/",
"93672b029103648953c4e5ad3ac3aadf346a4cdc/",
"super_resolution_0.2.onnx",
]
)
model_path = download_testdata(model_url, "super_resolution.onnx", module="onnx")
onnx_model = onnx.load(model_path)
from PIL |
import Image
img_url = "https:
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
img_ycbcr = img.convert("YCbCr")
img_y, img_cb, img_cr = img_ycbcr.split()
x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
target = "llvm"
input_name = "1"
shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
with tvm.transform.PassContext(opt_level=1):
executor = relay.build_module.create_executor(
"graph", mod, tvm.cpu(0), target, params
).evaluate()
dtype = "float32"
tvm_output = executor(tvm.nd.array(x.astype(dtype))).numpy()
from matplotlib |
import pyplot as plt
out_y = Image.fromarray(np.uint8((tvm_output[0, 0]).clip(0, 255)), mode="L")
out_cb = img_cb.resize(out_y.size, Image.BICUBIC)
out_cr = img_cr.resize(out_y.size, Image.BICUBIC)
result = Image.merge("YCbCr", [out_y, out_cb, out_cr]).convert("RGB")
canvas = np.full((672, 672 * 2, 3), 255)
canvas[0:224, 0:224, :] = np.asarray(img)
canvas[:, 672:, :] = np.asarray(result)
plt.imshow(canvas.astype(np.uint8))
plt.show() |
"""
Compile PaddlePaddle Models
===========================
**Author**: `Ziyuan Ma <https:
This article is an introductory tutorial to deploy PaddlePaddle models with Relay.
For us to begin with, PaddlePaddle>=2.1.3 is required to be installed.
A quick solution is
.. code-block:: bash
pip install paddlepaddle -i https:
or please refer to official site.
https:
"""
from tvm |
import testing
testing.utils.install_request_hook(depth=3) |
import tarfile |
import paddle |
import numpy as np |
import tvm
from tvm |
import relay
from tvm.contrib.download |
import download_testdata
url = "https:
model_path = download_testdata(url, "paddle_resnet50.tar", module="model")
with tarfile.open(model_path) as tar:
names = tar.getnames()
for name in names:
tar.extract(name, "./")
model = paddle.jit.load("./paddle_resnet50/model")
from PIL |
import Image |
import paddle.vision.transforms as T
transforms = T.Compose(
[
T.Resize((256, 256)),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img_url = "https:
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
img = transforms(img)
img = np.expand_dims(img, axis=0)
target = "llvm"
shape_dict = {"inputs": img.shape}
mod, params = relay.frontend.from_paddle(model, shape_dict)
with tvm.transform.PassContext(opt_level=3):
executor = relay.build_module.create_executor(
"graph", mod, tvm.cpu(0), target, params
).evaluate()
dtype = "float32"
tvm_output = executor(tvm.nd.array(img.astype(dtype))).numpy()
synset_url = "".join(
[
"https:
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = f.readlines()
top1 = np.argmax(tvm_output[0])
print(f"TVM prediction top-1 id: {top1}, class name: {synset[top1]}") |
"""
Compile PyTorch Models
======================
**Author**: `Alex Wong <https:
This article is an introductory tutorial to deploy PyTorch models with Relay.
For us to begin with, PyTorch should be installed.
TorchVision is also required since we will be using it as our model zoo.
A quick solution is to install via pip
.. code-block:: bash
pip install torch==1.7.0
pip install torchvision==0.8.1
or please refer to official site
https:
PyTorch versions should be backwards compatible but should be used
with the proper TorchVision version.
Currently, TVM supports PyTorch 1.7 and 1.4. Other versions may
be unstable.
"""
from tvm |
import testing
testing.utils.install_request_hook(depth=3) |
import tvm
from tvm |
import relay |
import numpy as np
from tvm.contrib.download |
import download_testdata |
import torch |
import torchvision
model_name = "resnet18"
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.eval()
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()
from PIL |
import Image
img_url = "https:
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
from torchvision |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.