text
stringlengths 1
2.05k
|
---|
import os
from tvm.contrib |
import nvcc |
import numpy as np
TASK = "lstm"
USE_MANUAL_CODE = False
PERSIST_KERNEL = True
DETECT_GLOBAL_BARRIER = PERSIST_KERNEL
SKIP_CHECK = False
UNROLL_WLOAD = True
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
"""Use nvcc compiler for better perf."""
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def lstm():
if not PERSIST_KERNEL:
raise ValueError("Non persist LSTM not yet supported")
num_thread_y = 8
num_thread_x = 16 * 3
num_sm = 24
n_num_step = 128
num_step = te.var("num_step")
num_hidden = 1152
batch_size = 1
Xi2h = te.placeholder((num_step, batch_size, 4, num_hidden), name="Xi2h")
Wh2h = te.placeholder((4, num_hidden, num_hidden), name="Wh2h")
s_state_h = te.placeholder((num_step, batch_size, num_hidden))
s_state_c = te.placeholder((num_step, batch_size, num_hidden))
s_init_c = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_c")
s_init_h = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_h")
k = te.reduce_axis((0, num_hidden), name="ki2h")
s_h2h = te.compute(
(num_step, batch_size, 4, num_hidden),
lambda t, i, x, j: te.sum(s_state_h[t - 1, i, k] * Wh2h[x, j, k], axis=k),
name="s_h2h",
)
gates = te.compute(Xi2h.shape, lambda *i: Xi2h(*i) + s_h2h(*i), name="gates")
gshape = (num_step, batch_size, num_hidden)
in_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, i, 0, j]), name="in_gate")
in_transform = te.compute(
gshape, lambda t, i, j: te.tanh(gates[t, i, 1, j]), name="in_transform" |
)
forget_gate = te.compute(
gshape, lambda t, i, j: te.sigmoid(gates[t, i, 2, j]), name="forget_gate"
)
out_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, i, 3, j]), name="out_gate")
next_c = te.compute(
gshape,
lambda t, i, j: forget_gate[t, i, j] * s_state_c[t - 1, i, j]
+ in_gate[t, i, j] * in_transform[t, i, j],
name="next_c",
)
next_h = te.compute(
gshape, lambda t, i, j: out_gate[t, i, j] * te.tanh(next_c[t, i, j]), name="next_h"
)
update_c = te.compute(gshape, lambda *i: next_c(*i), name="update_c")
update_h = te.compute(gshape, lambda *i: next_h(*i), name="update_h")
scan_h, scan_c = tvm.te.scan(
[s_init_h, s_init_c],
[update_h, update_c],
[s_state_h, s_state_c],
inputs=[Xi2h],
name="lstm_scan",
)
s = te.create_schedule(scan_h.op)
s[gates].compute_inline()
s[in_gate].compute_inline()
s[in_transform].compute_inline()
s[forget_gate].compute_inline()
s[out_gate].compute_inline()
block_x = te.thread_axis((0, num_sm), "blockIdx.x")
thread_x = te.thread_axis((0, num_thread_x), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread_y), "threadIdx.y")
s_state_h_S = s.cache_read(s_state_h, "shared", [s_h2h])
s_state_c_S = s.cache_read(s_state_c, "shared", [next_c])
Wh2hL = s.cache_read(Wh2h, "local", [s_h2h])
ko, ki = s[s_h2h].split(s[s_h2h].op.reduce_axis[0], nparts=num_thread_y)
s_h2h_rf = s.rfactor(s_h2h, ko)
s[s_h2h].bind(s[s_h2h].op.reduce_axis[0], thread_y)
s[s_h2h_rf].compute_at(s[s_h2h], s[s_h2h].op.reduce_axis[0])
if PERSIST_KERNEL:
s[scan_h.op].env_threads([block_x, thread_y, thread_x])
s[Wh2hL].compute_at(s[scan_h.op], thread_x)
else:
s[Wh2hL].compute_at(s[s_h2h], s[s_h2h].op.axis[3])
if UNROLL_WLOAD:
s[Wh2hL].unroll(Wh2hL.op.axis[0])
s[Wh2hL].unroll(Wh2hL.op.axis[2])
s[s_state_h_S].compute_at(s[s_h2h_rf], s[s_h2h_rf] |
.op.axis[3])
s[s_state_c_S].compute_at(s[scan_h.op], s[scan_h].op.scan_axis)
for ss in [s_state_h_S]:
xo, xi = s[ss].split(ss.op.axis[2], factor=num_thread_x * num_thread_y)
ty, xi = s[ss].split(xi, nparts=num_thread_y)
tx, xi = s[ss].split(xi, nparts=num_thread_x)
s[ss].bind(ty, thread_y)
s[ss].bind(tx, thread_x)
for init in [s_init_c, s_init_h]:
bx, xi = s[init].split(init.op.axis[2], nparts=num_sm)
tx, xi = s[init].split(xi, nparts=num_thread_x)
s[init].bind(bx, block_x)
s[init].bind(tx, thread_x)
s[next_c].set_store_predicate(thread_y.equal(0))
s[next_h].set_store_predicate(thread_y.equal(0))
for update in [update_c, update_h]:
bx, xi = s[update].split(s[update].op.axis[2], nparts=num_sm)
tx, xi = s[update].split(xi, nparts=num_thread_x)
s[update].bind(bx, block_x)
s[update].bind(tx, thread_x)
s[update].set_store_predicate(thread_y.equal(0))
def check_device(target):
num_step = n_num_step
flstm = tvm.build(s, [Xi2h, Wh2h, scan_h, scan_c], target)
dev = tvm.cuda(0) if target == "cuda" else tvm.cl(0)
scan_h_np = np.zeros((num_step, batch_size, num_hidden)).astype("float32")
scan_c_np = np.zeros((num_step, batch_size, num_hidden)).astype("float32")
Xi2h_np = np.random.normal(size=(num_step, batch_size, 4, num_hidden)).astype("float32")
Wh2h_np = np.random.normal(size=(4, num_hidden, num_hidden)).astype("float32")
scan_h_a = tvm.nd.array(scan_h_np, dev)
scan_c_a = tvm.nd.array(scan_c_np, dev)
Xi2h_a = tvm.nd.array(Xi2h_np, dev)
Wh2h_a = tvm.nd.array(Wh2h_np, dev)
flstm(Xi2h_a, Wh2h_a, scan_h_a, scan_c_a)
dev.sync()
evaluator = flstm.time_evaluator(flstm.entry_name, dev, 1, repeat=1000)
eval_result = evaluator(Xi2h_a, Wh2h_a, scan_h_a, scan_c_a)
print("Time cost=%g" % eval_result.mean)
with tvm.transform.PassConte |
xt(
config={
"tir.UnrollLoop": {
"auto_max_step": 128,
},
"tir.detect_global_barrier": DETECT_GLOBAL_BARRIER,
}
):
check_device("cuda")
if __name__ == "__main__":
lstm() |
"""Matrix exponential example.
This is an example for matrix exponential,
which calculates the following recursion formula
```math
X[t] = dot(X[t-1], W)
```
""" |
import tvm
from tvm |
import te |
import time |
import os |
import argparse
from tvm.contrib |
import nvcc |
import numpy as np
TASK = "matexp"
USE_MANUAL_CODE = False
PERSIST_KERNEL = True
DETECT_GLOBAL_BARRIER = PERSIST_KERNEL
SKIP_CHECK = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
"""Use nvcc compiler for better perf."""
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def rnn_matexp():
n_num_step = 128
n_num_hidden = 1152
n_batch_size = 4
detect_global_barrier = DETECT_GLOBAL_BARRIER
num_step = te.var("num_step")
num_hidden = tvm.runtime.convert(n_num_hidden)
batch_size = tvm.runtime.convert(n_batch_size)
num_thread_y = 8
num_thread_x = 16 * 3
num_sm = 24
Whh = te.placeholder((num_hidden, num_hidden), name="Whh")
s_init = te.compute((1, batch_size, num_hidden), lambda _, i, j: 1.0, name="init")
s_state = te.placeholder((num_step, batch_size, num_hidden))
kh = te.reduce_axis((0, num_hidden), name="kh")
s_update = te.compute(
(num_step, batch_size, num_hidden),
lambda t, i, j: te.sum(s_state[t - 1, i, kh] * Whh[kh, j], axis=kh),
name="update",
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
s = te.create_schedule(s_scan.op)
CL = s_update
SS = s.cache_read(s_state, "shared", [CL])
SL = s.cache_read(SS, "local", [CL])
WhhL = s.cache_read(Whh, "local", [CL])
ko, ki = s[CL].split(s[CL].op.reduce_axis[0], nparts=num_thread_y)
CLF = s.rfactor(CL, ko)
block_x = te.thread_axis((0, num_sm), "blockIdx.x")
thread_x = te.thread_axis((0, num_thread_x), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread_y), "threadIdx.y")
if PERSIST_KERNEL: |
s[s_scan.op].env_threads([block_x, thread_y, thread_x])
bx, xi = s[s_init].split(s_init.op.axis[2], nparts=num_sm)
tx, xi = s[s_init].split(xi, nparts=num_thread_x)
s[s_init].bind(bx, block_x)
s[s_init].bind(tx, thread_x)
bx, xi = s[s_update].split(s[CL].op.axis[2], nparts=num_sm)
tx, xi = s[s_update].split(xi, nparts=num_thread_x)
s[s_update].bind(bx, block_x)
s[s_update].bind(tx, thread_x)
s[CL].bind(s[CL].op.reduce_axis[0], thread_y)
s[CLF].compute_at(s[CL], s[CL].op.reduce_axis[0])
s[CL].set_store_predicate(thread_y.equal(0))
if PERSIST_KERNEL:
s[WhhL].compute_at(s[s_scan], thread_x)
s[WhhL].unroll(WhhL.op.axis[0])
else:
s[WhhL].compute_at(s[CLF], CLF.op.axis[3])
kr, ki = s[CLF].split(CLF.op.reduce_axis[0], nparts=1)
ko, ki = s[CLF].split(ki, factor=4)
s[SS].compute_at(s[CLF], kr)
s[SL].compute_at(s[CLF], ko)
xo, xi = s[SS].split(SS.op.axis[2], factor=num_thread_x * num_thread_y * 3)
ty, xi = s[SS].split(xi, nparts=num_thread_y)
tx, xi = s[SS].split(xi, nparts=num_thread_x)
s[SS].bind(ty, thread_y)
s[SS].bind(tx, thread_x)
def check_device(target):
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {
"auto_max_step": 128,
},
"tir.detect_global_barrier": detect_global_barrier,
}
):
f = tvm.build(s, [s_scan, Whh], target)
dev = tvm.cuda(0) if target == "cuda" else tvm.cl(0)
res_np = np.zeros((n_num_step, n_batch_size, n_num_hidden)).astype("float32")
Whh_np = np.zeros((n_num_hidden, n_num_hidden)).astype("float32")
Whh_np[:] = 2.0 / n_num_hidden
Whh_np[:, n_num_hidden
res_a = tvm.nd.array(res_np, dev)
Whh_a = tvm.nd.array(Whh_np, dev)
f(res_a, Whh_a)
dev.sync()
tstart = time.time()
f(res_a, Whh_a)
dev.sync()
tgap = time.time() - |
tstart
print("Time cost=%g" % tgap)
if not SKIP_CHECK:
res_cuda = res_a.numpy()
res_cmp = np.ones_like(res_np).astype("float64")
Whh_np = Whh_np.astype("float64")
for t in range(1, n_num_step):
res_cmp[t][:] = np.dot(res_cmp[t - 1], Whh_np)
for i in range(n_num_step):
for j in range(n_num_hidden):
if abs(res_cmp[i, 0, j] - res_cuda[i, 0, j]) > 1e-5:
print("%d, %d: %g vs %g" % (i, j, res_cmp[i, 0, j], res_cuda[i, 0, j]))
tvm.testing.assert_allclose(res_cuda, res_cmp, rtol=1e-3)
check_device("cuda")
if __name__ == "__main__":
rnn_matexp() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Template files for UMA tutorial
"""
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UMA backend for the my_ai_hw accelerator"""
from passes import MyAiHwConv2dPass
from tvm.relay.backend.contrib.uma.api.utils import PassPhase
from tvm.relay.backend.contrib.uma.backend import UMABackend
from codegen import gen_includes
from patterns import conv2d_pattern
class MyAiHwBackend(UMABackend):
"""UMA backend for the MyAiHw accelerator."""
def __init__(self):
super().__init__()
# Target configuration
self._register_target_attr("dimension")
# Relay Pattern registration
self._register_pattern("conv2d", conv2d_pattern())
# Relay to TIR function registration
self._register_tir_pass(PassPhase.TIR_PHASE_0, MyAiHwConv2dPass())
# TIR to runtime function registration
self._register_codegen(fmt="c", includes=gen_includes)
@property
def target_name(self):
return "my_ai_hw"
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UMA codegen for the my_ai_hw accelerator"""
import tvm
import pathlib
def gen_includes() -> str:
topdir = pathlib.Path(__file__).parent.absolute()
includes = ""
includes += f'#include "{topdir}/conv2dnchw.cc"'
return includes
|
"""Transform passes for the my_ai_hw accelerator""" |
import tvm
from tvm |
import tir
from tvm.relay.backend.contrib.uma.api.utils |
import add_llvm_to_block
@tvm.tir.transform.prim_func_pass(opt_level=2)
class MyAiHwConv2dPass:
_EXTERNAL_FUNCTION_NAME = "my_ai_hw_conv2dnchw"
_TVM_BLOCK_MATCH_NAME = "conv2d_nchw"
def transform_function(
self, func: tvm.tir.PrimFunc, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.tir.PrimFunc:
return self._my_ai_hw_conv2d_pass(func, mod, ctx)
@classmethod
def _my_ai_hw_conv2d_pass(cls, func, mod, ctx):
_loops = dict()
_handles = []
_entry_node = None
def _has_block(name: str, func: tvm.tir.PrimFunc) -> bool:
"""
Determine of a tir.block with `name` exists in `func`
"""
def _hb(op):
if isinstance(op, tvm.tir.Block):
_found_blocks.append(op.name_hint)
_found_blocks = []
tvm.tir.stmt_functor.post_order_visit(func.body, _hb)
return name in _found_blocks
def _detect_and_replace_conv2d(
func: tvm.tir.PrimFunc, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.tir.PrimFunc:
def _replace_conv2d(op):
if op == _entry_node:
irb = tvm.tir.ir_builder.create()
buffers = [b[1].data for b in _handles]
for k, v in _loops.items():
assert v.min.value == 0
offset_order = ["co", "w", "h", "ci", "kh", "kw"]
offsets = [_loops[i].extent.value for i in offset_order]
args = buffers + offsets
irb.emit(tir_call(irb, True, cls._EXTERNAL_FUNCTION_NAME, *args))
irb_result = irb.get()
return irb_result
elif isinstance(op, tvm.tir.SeqStmt):
return op.seq[1]
return op
sch = tir.Schedule(func)
if _has_block(cls._TVM_BLOCK_M |
ATCH_NAME, func):
conv2d_block = sch.get_block(cls._TVM_BLOCK_MATCH_NAME)
rv_loops = sch.get_loops(conv2d_block)
assert len(rv_loops) == 7
loops = dict(
n=rv_loops[0],
co=rv_loops[1],
h=rv_loops[2],
w=rv_loops[3],
ci=rv_loops[4],
kh=rv_loops[5],
kw=rv_loops[6],
)
_entry_node = sch.get(rv_loops[1])
_loops = {k: sch.get(v) for k, v in loops.items()}
_handles = func.buffer_map.items()
x = tvm.tir.stmt_functor.ir_transform(
func.body, None, _replace_conv2d, ["tir.For", "tir.SeqStmt"]
)
return func.with_body(x)
else:
return func
r = _detect_and_replace_conv2d(func, mod, ctx)
return r
def tir_call(ib: tvm.tir.ir_builder, extern: bool, name: str, *args):
"""
ib: ir_builder
extern: bool
True --> tvm.tir.call_extern
False --> tvm.tir.call_packed
name: str
function name
*args:
arguments for function call
"""
def buf_from_array(ib, arr, dtype):
var = ib.allocate("int32", (len(arr),), scope="global")
for i, v in enumerate(arr):
var[i] = v
buf = tvm.tir.decl_buffer((len(arr),), dtype, data=var, scope="global")
return buf
if extern:
args = [i.data if isinstance(i, tvm.tir.Buffer) else i for i in args]
return tvm.tir.call_extern("int32", name, *args)
else:
args = [
buf_from_array(ib, i, "int32")
if isinstance(i, (tuple, list, tvm.ir.container.Array))
else i
for i in args
]
return tvm.tir.call_packed(name, *args) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relay graph patterns for the my_ai_hw accelerator"""
from tvm.relay.dataflow_pattern import is_op, wildcard
def conv2d_pattern():
pattern = is_op("nn.conv2d")(wildcard(), wildcard())
pattern = pattern.has_attr({"strides": [1, 1], "groups": 1})
return pattern
def dense_pattern():
pattern = is_op("nn.dense")(wildcard(), wildcard())
return pattern
|
from tvm.micro.testing.aot_test_utils |
import AOT_DEFAULT_RUNNER |
import tvm
from tvm |
import relay
from backend |
import MyAiHwBackend
from tvm.relay |
import transform
from collections |
import OrderedDict |
import numpy as np
from tvm.testing.aot |
import (
AOTTestModel as AOTModel,
AOTTestRunner as AOTRunner,
generate_ref_data,
compile_and_run,
)
def create_conv2d(groups=1, runner=AOT_DEFAULT_RUNNER, weight_shape=32):
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
pass_config = {"tir.usmp.enable": True}
runner = AOTRunner(
makefile=runner.makefile,
prologue=runner.prologue,
epilogue=runner.epilogue,
includes=runner.includes,
parameters=runner.parameters,
pass_config=pass_config,
)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
return mod, inputs, output_list, runner
def main():
mod, inputs, output_list, runner = create_conv2d()
uma_backend = MyAiHwBackend()
uma_backend.register()
mod = uma_backend.partition(mod)
target = tvm.target.Target("my_ai_hw", host=tvm.target.Target("c"))
export_directory = tvm.contrib.utils.tempdir(keep_for_debug=True).path
print(f"Generated files are in {export_directory}")
compile_and_run(
AOTModel(module=mod, inputs=inputs, outputs=output_list),
runner,
interface_api="c",
use_unpacked_api=True,
target=target,
test_dir=str(export_directory),
)
if __name__ == "__main__":
main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Strategies for the my_ai_hw accelerator"""
# Example how to integrate a custom conv1d strategy:
# @relay.op.strategy.override_native_generic_func("custom_conv1d_strategy")
# def custom_conv1d_strategy(attrs, inputs, out_type, target):
# strategy = _op.OpStrategy()
# strategy.add_implementation(
# wrap_compute_conv1d(custom_conv1d_compute),
# wrap_topi_schedule(custom_conv1d_schedule),
# name="custom_conv1d.generic",
# return strategy
#
# For further details see:
# - github.com/apache/tvm-rfcs/blob/main/rfcs/0060_UMA_Unified_Modular_Accelerator_Interface.md
# - $TVM_HOME/python/tvm/relay/op/strategy/x86.py
|
"""
UMA Command Line Interface (CLI)
Tool to create code skeletons for an easy integration of
new AI hardware accelerators/libraries into TVM using UMA
""" |
import argparse |
import os |
import shutil |
import sys |
import pathlib
from inflection |
import camelize, underscore
def _parse_args():
parser = argparse.ArgumentParser(description="UMA Interface command line interface")
parser.add_argument(
"--add_hardware",
type=str,
required=True,
)
parser.add_argument(
"--tutorial",
type=str,
)
args = parser.parse_args()
return args
def replace_template_name(
files: list, template_name: str, add_hw_name: str, template_source: str = "_template"
) -> None:
"""
Replace names in template skeleton code by new name
"""
for f in files:
with open(f) as read_file:
data = read_file.read()
for case in [underscore, camelize]:
data = data.replace(case(template_name), case(add_hw_name))
data = data.replace(template_source, underscore(add_hw_name))
with open(f, "w") as write_file:
write_file.write(data)
def main():
"""
UMA Command Line Interface (CLI)
"""
args = _parse_args()
add_hw_name = args.add_hardware
uma_template_path = pathlib.Path(os.getcwd(), "_template").absolute()
add_hw_path = os.path.join(uma_template_path.parent, add_hw_name)
if os.path.exists(add_hw_path):
print(
f"Hardware with name {add_hw_name} already exists in UMA file structure: {add_hw_path}"
)
sys.exit(-1)
else:
os.mkdir(add_hw_path)
uma_files = ["backend.py", "codegen.py", "passes.py", "patterns.py", "run.py", "strategies.py"]
if args.tutorial == "vanilla":
uma_files.append("conv2dnchw.cc")
source_files = [os.path.join(uma_template_path, f) for f in uma_files]
destination_files = [os.path.join(add_hw_path, f) for f in uma_files]
for src, dst in zip(source_files, destination_files):
shutil.copyfile(src, dst)
template_name = "my_ai_hw"
replace_template_name(destination_files, template_name, add_hw_name)
print(f"Success: added {add_hw_name} to {add_hw_path}")
if __name__ == "__main__":
main() |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
fn main() {
let out_dir = concat!(env!("CARGO_MANIFEST_DIR"), "/lib");
println!("cargo:rustc-link-search=native={}", out_dir);
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
extern crate lazy_static;
extern crate serde_derive;
mod types;
mod utils;
use std::{collections::HashMap, convert::TryFrom, env, sync::Mutex};
use tvm_graph_rt::{Graph, GraphExecutor, SystemLibModule, Tensor as TVMTensor};
use types::Tensor;
extern "C" {
fn __wasm_call_ctors();
}
lazy_static! {
static ref SYSLIB: SystemLibModule = SystemLibModule::default();
static ref GRAPH_EXECUTOR: Mutex<GraphExecutor<'static, 'static>> = {
unsafe {
__wasm_call_ctors();
}
let graph = Graph::try_from(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/lib/graph.json"
)))
.unwrap();
let params_bytes =
include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/lib/graph.params"));
let params = tvm_graph_rt::load_param_dict(params_bytes)
.unwrap()
.into_iter()
.map(|(k, v)| (k, v.to_owned()))
.collect::<HashMap<String, TVMTensor<'static>>>();
let mut exec = GraphExecutor::new(graph, &*SYSLIB).unwrap();
exec.load_params(params);
Mutex::new(exec)
};
}
pub extern "C" fn run(wasm_addr: i32, in_size: i32) -> i32 {
let in_tensor = unsafe { utils::load_i |
nput(wasm_addr, in_size as usize) };
let input: TVMTensor = in_tensor.as_dltensor().into();
let mut executor = GRAPH_EXECUTOR.lock().unwrap();
executor.set_input("data", input);
executor.run();
let output = executor.get_output(0).unwrap().as_dltensor(false);
let out_tensor: Tensor = output.into();
let out_size = unsafe { utils::store_output(wasm_addr, out_tensor) };
out_size as i32
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::{
any::TypeId,
os::raw::{c_int, c_void},
slice,
};
pub use tvm_sys::ffi::DLTensor;
use tvm_sys::ffi::{
DLDataType, DLDataTypeCode_kDLFloat, DLDataTypeCode_kDLInt, DLDevice, DLDeviceType_kDLCPU,
};
pub enum DataType {
FP32,
INT32,
INT8,
}
impl DataType {
pub fn as_dldtype(&self) -> DLDataType {
match self {
DataType::INT32 => DLDataType {
code: DLDataTypeCode_kDLInt as u8,
bits: 32u8,
lanes: 1u16,
},
DataType::INT8 => DLDataType {
code: DLDataTypeCode_kDLInt as u8,
bits: 8u8,
lanes: 1u16,
},
DataType::FP32 => DLDataType {
code: DLDataTypeCode_kDLFloat as u8,
bits: 32u8,
lanes: 1u16,
},
}
}
pub fn is_type<T: 'static>(&self) -> bool {
let typ = TypeId::of::<T>();
typ == TypeId::of::<i32>() || typ == TypeId::of::<i8>() || typ == TypeId::of::<f32>()
}
}
impl From<DLDataType> for DataType {
fn from(dl_dtype: DLDataType) -> Self {
if dl_dtype.code == DLDataTypeCode_kDLInt as u8 && dl_dtype.bits == 32u8 {
DataType::INT32
} else |
if dl_dtype.code == DLDataTypeCode_kDLInt as u8 && dl_dtype.bits == 8u8 {
DataType::INT8
} else if dl_dtype.code == DLDataTypeCode_kDLFloat as u8 && dl_dtype.bits == 32u8 {
DataType::FP32
} else {
DataType::FP32
}
}
}
pub |
struct Tensor {
pub(crate) dtype: DataType,
pub(crate) shape: Vec<i64>,
pub(crate) strides: Option<Vec<usize>>,
pub(crate) data: Vec<u8>,
}
impl Tensor {
pub fn new(dtype: DataType, shape: Vec<i64>, strides: Vec<usize>, data: Vec<u8>) -> Self {
Tensor {
dtype,
shape,
strides: Some(strides),
data,
}
}
pub fn dtype(&self) -> DataType {
self.dtype.clone()
}
pub fn ndim(&self) -> usize {
self.shape.len()
}
pub fn shape(&self) -> Vec<i64> {
self.shape.clone()
}
pub fn data(&self) -> Vec<u8> {
self.data.clone()
}
pub fn as_dltensor(&self) -> DLTensor {
DLTensor {
data: self.data.as_ptr() as *mut c_void,
device: DLDevice {
device_type: DLDeviceType_kDLCPU,
device_id: 0 as c_int,
},
ndim: self.shape.len() as c_int,
dtype: self.dtype().as_dldtype(),
shape: self.shape.as_ptr() as *mut i64,
strides: self.strides.as_ref().unwrap().as_ptr() as *mut i64,
byte_offset: 0,
..Default::default()
}
}
pub fn to_vec<T: 'static + std::fmt::Debug + Clone>(&self) -> Vec<T> {
assert!(self.dtype().is_type::<T>());
unsafe {
slice::from_raw_parts(
self.data().as_ptr() as *const T,
self.shape().iter().map(|v| *v as usize).product::<usize>() as usize,
)
.to_vec()
}
}
}
impl Default for Tensor {
fn default() -> Self {
Self {
dtype: DataType::FP32,
shape: Vec::new(),
strides: None,
data: Vec::new(),
}
}
}
impl From<DLTensor> for Tensor {
fn from(dlt: DLTensor) -> Self {
unsafe {
let shape = slice::from_raw_parts_mut(dlt.shape, dlt.ndim as usize).to_vec();
let size = shape.iter().map(|v| *v as usi |
ze).product::<usize>() as usize;
let itemsize: usize = (dlt.dtype.bits >> 3).into();
let data = slice::from_raw_parts(dlt.data as *const u8, size * itemsize).to_vec();
Self {
dtype: DataType::from(dlt.dtype),
shape,
strides: if dlt.strides.is_null() {
None
} else {
Some(
slice::from_raw_parts_mut(dlt.strides as *mut usize, dlt.ndim as usize)
.to_vec(),
)
},
data,
}
}
}
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use super::types::*;
use serde_json;
use std::ptr;
pub unsafe fn load_input(in_addr: i32, in_size: usize) -> Tensor {
let in_addr = in_addr as *mut u8;
println!("DEBUG: in_addr {:?}, in_size {:?}", in_addr, in_size);
let data_vec = unsafe { std::slice::from_raw_parts(in_addr, in_size) };
let input = serde_json::from_slice(&data_vec);
match input {
Ok(result) => {
println!("DEBUG: SER SUCCEED!!! and Ok");
result
}
Err(e) => {
panic!("DEBUG: SER SUCCEED!!! but Err, {:?}", &e);
}
}
}
pub unsafe fn store_output(out_addr: i32, output: Tensor) -> usize {
let out_addr = out_addr as *mut u8;
let data_vec = serde_json::to_vec(&output).unwrap();
let data_size = data_vec.len();
for i in 0..data_size {
ptr::write(out_addr.offset(i as isize), *data_vec.get(i).unwrap());
}
data_size
}
|
"""Builds a simple resnet50 graph for testing.""" |
import argparse |
import os |
import subprocess |
import sys |
import onnx |
import tvm
from tvm |
import relay, runtime
from tvm.contrib.download |
import download_testdata
from tvm.contrib |
import graph_executor
from PIL |
import Image |
import numpy as np |
import tvm.relay as relay
model_url = (
"https:
"vision/classification/resnet/model/"
"resnet50-v2-7.onnx"
)
def build_graph_lib(opt_level):
"""Compiles the pre-trained model with TVM"""
out_dir = os.path.join(sys.path[0], "../lib")
if not os.path.exists(out_dir):
os.makedirs(out_dir)
model_path = download_testdata(model_url, "resnet50-v2-7.onnx", module="onnx")
onnx_model = onnx.load(model_path)
img_url = "https:
img_path = download_testdata(img_url, "imagenet_cat.png", module="data")
resized_image = Image.open(img_path).resize((224, 224))
img_data = np.asarray(resized_image).astype("float32")
img_data = np.transpose(img_data, (2, 0, 1))
imagenet_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
imagenet_stddev = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
norm_img_data = (img_data / 255 - imagenet_mean) / imagenet_stddev
img_data = np.expand_dims(norm_img_data, axis=0)
input_name = "data"
shape_dict = {input_name: img_data.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
target = "llvm -mtriple=wasm32-unknown-unknown -mattr=+simd128"
with tvm.transform.PassContext(opt_level=opt_level):
factory = relay.build(
mod,
target=target,
params=params,
runtime=tvm.relay.backend.Runtime("cpp", {"system-lib": True}),
)
obj_file = os.path.join(out_dir, "graph.o")
factory.get_lib().save(obj_file)
lib_file = os.path.join(out_dir, "libgraph_wasm32.a")
cmds = [os.environ.get("LLVM_AR", "llvm-ar-10"), "rcs", lib_file, obj_file]
subprocess.run(cmds)
with open(os.path.join(out_dir, "graph.json"), "w") as f_graph:
f_graph.write(factory.get_graph_json())
with open(os.path.join(out_dir, "graph.params"), "wb") as f_params:
f_params.write(runtime.save_param_dict(factory.get_params()))
if __name__ == "__main__":
parser = argparse.ArgumentPa |
rser(description="ONNX model build example")
parser.add_argument(
"-O",
"--opt-level",
type=int,
default=0,
help="level of optimization. 0 is non-optimized and 3 is the highest level",
)
args = parser.parse_args()
build_graph_lib(args.opt_level) |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use anyhow::Result;
use wasmtime::*;
use wasmtime_wasi::{WasiCtx, WasiCtxBuilder};
use super::Tensor;
pub |
struct GraphExecutor {
pub(crate) wasm_addr: i32,
pub(crate) input_size: i32,
pub(crate) output_size: i32,
pub(crate) store: Option<Store<WasiCtx>>,
pub(crate) instance: Option<Instance>,
}
impl GraphExecutor {
pub fn new() -> Self {
Self {
wasm_addr: 0,
input_size: 0,
output_size: 0,
store: None,
instance: None,
}
}
pub fn instantiate(&mut self, wasm_graph_file: String) -> Result<()> {
let engine = Engine::new(Config::new().wasm_simd(true)).unwrap();
let mut linker = Linker::new(&engine);
wasmtime_wasi::add_to_linker(&mut linker, |s| s)?;
let wasi = WasiCtxBuilder::new()
.inherit_stdio()
.inherit_args()?
.build();
let mut store = Store::new(&engine, wasi);
let module = Module::from_file(&engine, &wasm_graph_file)?;
self.instance = Some(linker.instantiate(&mut store, &module)?);
self.store = Some(store);
Ok(())
}
pub fn set_input(&mut self, input_data: Tensor) -> Result<()> {
let memory = self
.instance
.as_ref()
.unwrap()
.get_memory(self.store.as_mut().unwrap(), "memory")
.ok_or_else(|| anyhow::format_err!("failed to find `memory` export"))?;
let wasm_addr = memory.data_size(self.store.as_mut().unwrap());
let in_data = serde_json::to_vec(&input_data)?;
let in_size = in_data.len();
memory.grow(self.store.as_mut().unwrap(), (in_size >> 16) as u32 + 1)?;
memory.write(self.store.as_mut().unwrap(), wasm_addr, &in_data)?;
self.wasm_addr = wasm_addr as i32;
self.input_size = in_size as i32;
Ok(())
}
pub fn run(&mut self) -> Result<()> {
let run = self |
.instance
.as_ref()
.unwrap()
.get_func(self.store.as_mut().unwrap(), "run")
.ok_or_else(|| anyhow::format_err!("failed to find `run` function export!"))?;
let params = [Val::I32(self.wasm_addr), Val::I32(self.input_size)];
let out_size = run.call(self.store.as_mut().unwrap(), ¶ms[..])?;
let out_size = (*out_size)[0].unwrap_i32();
if out_size == 0 {
panic!("graph run failed!");
}
self.output_size = out_size;
Ok(())
}
pub fn get_output(&mut self) -> Result<Tensor> {
let memory = self
.instance
.as_ref()
.unwrap()
.get_memory(self.store.as_mut().unwrap(), "memory")
.ok_or_else(|| anyhow::format_err!("failed to find `memory` export"))?;
let mut out_data = vec![0 as u8; self.output_size as _];
memory.read(
self.store.as_mut().unwrap(),
self.wasm_addr as _,
&mut out_data,
)?;
let out_vec: Tensor = serde_json::from_slice(&out_data).unwrap();
Ok(out_vec)
}
}
impl Default for GraphExecutor {
fn default() -> Self {
Self::new()
}
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#[macro_use]
extern crate serde_derive;
mod graph;
mod types;
pub use graph::GraphExecutor;
pub use types::Tensor;
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::{any::TypeId, mem, slice};
pub enum DataType {
FP32,
INT32,
INT8,
}
impl DataType {
pub fn is_type<T: 'static>(&self) -> bool {
let typ = TypeId::of::<T>();
typ == TypeId::of::<i32>() || typ == TypeId::of::<i8>() || typ == TypeId::of::<f32>()
}
}
pub |
struct Tensor {
pub(crate) dtype: DataType,
pub(crate) shape: Vec<i64>,
pub(crate) strides: Option<Vec<usize>>,
pub(crate) data: Vec<u8>,
}
impl Tensor {
pub fn new(dtype: DataType, shape: Vec<i64>, strides: Vec<usize>, data: Vec<u8>) -> Self {
Tensor {
dtype,
shape,
strides: Some(strides),
data,
}
}
pub fn dtype(&self) -> DataType {
self.dtype.clone()
}
pub fn ndim(&self) -> usize {
self.shape.len()
}
pub fn shape(&self) -> Vec<i64> {
self.shape.clone()
}
pub fn data(&self) -> Vec<u8> {
self.data.clone()
}
pub fn to_vec<T: 'static + std::fmt::Debug + Clone>(&self) -> Vec<T> {
assert!(self.dtype().is_type::<T>());
unsafe {
slice::from_raw_parts(
self.data().as_ptr() as *const T,
self.shape().iter().map(|v| *v as usize).product::<usize>() as usize,
)
.to_vec()
}
}
}
impl Default for Tensor {
fn default() -> Self {
Self {
dtype: DataType::FP32,
shape: Vec::new(),
strides: None,
data: Vec::new(),
}
}
}
macro_rules! impl_tensor_from_ndarray {
($type:ty, $typecode:expr) => {
impl<D: ndarray::Dimension> From<ndarray::Array<$type, D>> for Tensor {
fn from(arr: ndarray::Array<$type, D>) -> Self {
Tensor {
dtype: $typecode,
shape: arr.shape().iter().map(|v| *v as i64).collect(),
strides: Some(arr.strides().iter().map(|v| *v as usize).collect()),
data: unsafe {
slice::from_raw_parts(
arr.as_ptr() as *const u8,
arr.len() * mem::size_of::<$type>(),
)
.to_vec()
},
}
}
} |
};
}
impl_tensor_from_ndarray!(f32, DataType::FP32);
impl_tensor_from_ndarray!(i32, DataType::INT32);
impl_tensor_from_ndarray!(i8, DataType::INT8); |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use getopts::Options;
use image::{FilterType, GenericImageView};
use ndarray::Array;
use std::{collections::HashMap, env, fs::File, io::BufReader};
use wasm_runtime::{GraphExecutor, Tensor};
const IMG_HEIGHT: usize = 224;
const IMG_WIDTH: usize = 224; |
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} [options]", program);
print!("{}", opts.usage(&brief));
} |
fn main() {
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut opts = Options::new();
opts.optopt(
"g",
"wasm-graph-file",
"set the path to wasm graph file",
"FILE_PATH",
);
opts.optopt(
"i",
"input-data-file",
"set the path to input image file",
"FILE_PATH",
);
opts.optopt(
"l",
"label-class-file",
"set the path to label class file",
"FILE_PATH",
);
opts.optflag("h", "help", "print this help menu");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!(f.to_string()),
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let wasm_graph_file: String = match matches.opt_str("g") {
Some(s) => s,
None => String::from(""),
};
let input_data_file: String = match matches.opt_str("i") {
Some(s) => s,
None => String::from(""),
};
let label_class_file: String = match matches.opt_str("l") {
Some(s) => s,
None => String::from(""),
};
let img = image::open(input_data_file).unwrap();
let input = data_preprocess(img);
let mut graph_exec = GraphExecutor::new();
graph_exec.instantiate(wasm_graph_file).unwrap();
graph_exec.set_input(input).unwrap();
graph_exec.run().unwrap();
let output: Tensor = match graph_exec.get_output() {
Ok(m) => m,
Err(f) => panic!(f.to_string()),
};
output_assert(output, label_class_file);
}
fn data_preprocess(img: image::DynamicImage) -> Tensor {
println!("original image dimensions: {:?}", img.dimensions());
let img = img
.resize_exact(IMG_HEIGHT as u32, IMG_WIDTH as u32, FilterType::Nearest)
.to_rgb();
println!("resized image dimensions: {:?}", img.dimensions());
let mut pixels: Vec<f32> = vec![];
for pixel in img.pixels() {
let tmp = pixel.data;
let tmp = [ |
(tmp[0] as f32 - 123.0) / 58.395,
(tmp[1] as f32 - 117.0) / 57.12,
(tmp[2] as f32 - 104.0) / 57.375,
];
for e in &tmp {
pixels.push(*e);
}
}
let arr = Array::from_shape_vec((IMG_HEIGHT, IMG_WIDTH, 3), pixels).unwrap();
let arr = arr.permuted_axes([2, 0, 1]);
let arr = Array::from_iter(arr.into_iter().copied().map(|v| v));
Tensor::from(arr)
} |
fn output_assert(out_tensor: Tensor, label_class_file: String) {
let output = out_tensor.to_vec::<f32>();
let mut argmax = -1;
let mut max_prob = 0.;
for i in 0..output.len() {
if output[i] > max_prob {
max_prob = output[i];
argmax = i as i32;
}
}
let mut synset: HashMap<i32, String> = HashMap::new();
let mut rdr = csv::ReaderBuilder::new().from_reader(BufReader::new(
File::open(label_class_file.as_str()).unwrap(),
));
for result in rdr.records() {
let record = result.unwrap();
let id: i32 = record[0].parse().unwrap();
let cls = record[1].to_string();
synset.insert(id, cls);
}
println!(
"input image belongs to the class `{}`",
synset
.get(&argmax)
.expect("cannot find the class id for argmax")
);
} |
import jinja2 |
import argparse |
import difflib |
import datetime |
import re |
import textwrap
from pathlib |
import Path
from typing |
import List
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
JENKINSFILE_TEMPLATE = REPO_ROOT / "ci" / "jenkins" / "Jenkinsfile.j2"
JENKINSFILE = REPO_ROOT / "Jenkinsfile"
class Change:
IMAGES_ONLY = object()
NONE = object()
FULL = object()
data = {
"images": [
{
"name": "ci_arm",
"platform": "ARM",
},
{
"name": "ci_cortexm",
"platform": "CPU",
},
{
"name": "ci_cpu",
"platform": "CPU",
},
{
"name": "ci_gpu",
"platform": "CPU",
},
{
"name": "ci_hexagon",
"platform": "CPU",
},
{
"name": "ci_i386",
"platform": "CPU",
},
{
"name": "ci_lint",
"platform": "CPU",
},
{
"name": "ci_minimal",
"platform": "CPU",
},
{
"name": "ci_riscv",
"platform": "CPU",
},
{
"name": "ci_wasm",
"platform": "CPU",
},
]
}
def lines_without_generated_tag(content):
return [
line for line in content.splitlines(keepends=True) if not line.startswith("
]
def change_type(lines: List[str]) -> Change:
"""
Return True if 'line' only edits an image tag or if 'line' is not a changed
line in a diff
"""
added_images = []
removed_images = []
diff_lines = []
for line in lines[2:]:
if not line.startswith("-") and not line.startswith("+"):
continue
diff_lines.append(line)
if len(diff_lines) == 0:
return Change.NONE
for line in diff_lines:
is_add = line.startswith("+")
line = line.strip().lstrip("+").lstrip("-")
match = re.search(
r"^(ci_[a-zA-Z0-9]+) = \'.*\'$",
line.strip().lstrip("+").lstrip("-"),
flags=re.MULTILINE,
)
if match is None: |
return Change.FULL
if is_add:
added_images.append(match.groups()[0])
else:
removed_images.append(match.groups()[0])
if len(added_images) > 0 and added_images == removed_images:
return Change.IMAGES_ONLY
else:
return Change.FULL
if __name__ == "__main__":
help = "Regenerate Jenkinsfile from template"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--force", action="store_true", help="always overwrite timestamp")
parser.add_argument("--check", action="store_true", help="just verify the output didn't change")
args = parser.parse_args()
with open(JENKINSFILE) as f:
content = f.read()
data["generated_time"] = datetime.datetime.now().isoformat()
timestamp_match = re.search(r"^
if not timestamp_match:
raise RuntimeError("Could not find timestamp in Jenkinsfile")
original_timestamp = timestamp_match.groups()[0]
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(REPO_ROOT),
undefined=jinja2.StrictUndefined,
lstrip_blocks=True,
trim_blocks=True,
keep_trailing_newline=True,
)
template = environment.get_template(str(JENKINSFILE_TEMPLATE.relative_to(REPO_ROOT)))
new_content = template.render(**data)
diff = [
line
for line in difflib.unified_diff(
lines_without_generated_tag(content), lines_without_generated_tag(new_content)
)
]
change = change_type(diff)
if not args.force and change == Change.IMAGES_ONLY or change == Change.NONE:
if change != Change.NONE:
print("Detected only Docker-image name changes, skipping timestamp update")
new_content = new_content.replace(data["generated_time"], original_timestamp)
diff = "".join(diff)
if args.check:
if not diff:
print("Success, the newly generated Jenkinsfile matched the one on disk")
exit(0)
else: |
print(
textwrap.dedent(
"""
Newly generated Jenkinsfile did not match the one on disk! If you have made
edits to the Jenkinsfile, move them to 'jenkins/Jenkinsfile.j2' and
regenerate the Jenkinsfile from the template with
python3 -m pip install -r jenkins/requirements.txt
python3 jenkins/generate.py
Diffed changes:
"""
).strip()
)
print(diff)
exit(1)
else:
with open(JENKINSFILE, "w") as f:
f.write(new_content)
if not diff:
print(f"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, no changes made")
else:
print(f"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, changes:")
print(diff) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Package to enable testing of CI scripts"""
from . import github_skipped_tests_comment, github_pr_comment, github_tag_teams, github_docs_comment
|
import argparse |
import re |
import os |
import json |
import textwrap
from dataclasses |
import dataclass
from typing |
import Any, List, Callable
from git_utils |
import GitHubRepo, parse_remote, git
from cmd_utils |
import init_log, tags_from_title
GITHUB_USERNAME_REGEX = re.compile(r"(@[a-zA-Z0-9-]+)", flags=re.MULTILINE)
OK = object()
FAIL = object()
@dataclass
class Check:
check: Callable[[str], Any]
error_fn: Callable[[Any], str]
def non_empty(s: str):
if len(s) == 0:
return FAIL
return OK
def usernames(s: str):
m = GITHUB_USERNAME_REGEX.findall(s)
return m if m else OK
def tags(s: str):
items = tags_from_title(s)
if len(items) == 0:
return FAIL
return OK
def trailing_period(s: str):
if s.endswith("."):
return FAIL
return OK
title_checks = [
Check(check=non_empty, error_fn=lambda d: "PR must have a title but title was empty"),
Check(check=trailing_period, error_fn=lambda d: "PR must not end in a tailing '.'"),
]
body_checks = [
Check(check=non_empty, error_fn=lambda d: "PR must have a body but body was empty"),
]
def run_checks(checks: List[Check], s: str, name: str) -> bool:
print(f"Running checks for {name}")
print(textwrap.indent(s, prefix=" "))
passed = True
print(" Checks:")
for i, check in enumerate(checks):
result = check.check(s)
if result == OK:
print(f" [{i+1}] {check.check.__name__}: PASSED")
else:
passed = False
msg = check.error_fn(result)
print(f" [{i+1}] {check.check.__name__}: FAILED: {msg}")
return passed
if __name__ == "__main__":
init_log()
help = "Check a PR's title and body for conformance to guidelines"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--pr", required=True)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument(
"--pr-data", help="(testing) PR data to use instead of fetching from GitHub"
)
args = parser.parse_args()
try:
pr = int(args.pr)
except ValueError:
print(f"PR was not a number |
: {args.pr}")
exit(0)
if args.pr_data:
pr = json.loads(args.pr_data)
else:
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
pr = github.get(f"pulls/{args.pr}")
body = "" if pr["body"] is None else pr["body"].strip()
title = "" if pr["title"] is None else pr["title"].strip()
title_passed = run_checks(checks=title_checks, s=title, name="PR title")
print("")
body_passed = run_checks(checks=body_checks, s=body, name="PR body")
if title_passed and body_passed:
print("All checks passed!")
exit(0)
else:
print(
"Some checks failed, please review the logs above and edit your PR on GitHub accordingly"
)
exit(1) |
import subprocess |
import os |
import logging |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.