text
stringlengths 1
2.05k
|
---|
import iris.NetworkModelInitializer
finally:
sys.path.pop(0)
self._iris_lib = iris
def _convertStringToU64Array(strValue):
numBytes = len(strValue)
if numBytes == 0:
return []
numU64 = (numBytes + 7)
strExt = strValue.ljust(8 * numU64, b"\0")
return struct.unpack("<{}Q".format(numU64), strExt)
iris.iris.convertStringToU64Array = _convertStringToU64Array
def open(self):
args = ["ninja"]
if self.options.get("verbose"):
args.append("-v")
args.append("run")
env = dict(os.environ)
env["ARMFVP_BIN_PATH"] = str(API_SERVER_DIR / "fvp-hack")
self.proc = subprocess.Popen(
args,
cwd=BUILD_DIR,
env=env,
stdout=subprocess.PIPE,
)
threading.Thread(target=self._fvp_check_stdout, daemon=True).start()
self.iris_port = self._wait_for_fvp()
_LOG.info("IRIS started on port %d", self.iris_port)
NetworkModelInitializer = self._iris_lib.NetworkModelInitializer.NetworkModelInitializer
self._model_init = NetworkModelInitializer(
host="localhost", port=self.iris_port, timeout_in_ms=1000
)
self._model = self._model_init.start()
self._target = self._model.get_target("component.FVP_MPS3_Corstone_SSE_300.cpu0")
self._target.handle_semihost_io()
self._target._stdout = BlockingStream()
self._target._stdin = BlockingStream()
self._model.run(blocking=False, timeout=100)
self._wait_for_semihost_init()
_LOG.info("IRIS semihosting initialized.")
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=10.0,
session_established_timeout_sec=10.0,
)
def _fvp_check_stdout(self):
START_MSG = "Iris server started listening to port"
INIT_MSG |
= "microTVM Zephyr runtime - running"
for line in self.proc.stdout:
line = str(line, "utf-8")
_LOG.info("%s", line)
start_msg = re.match(START_MSG + r" ([0-9]+)\n", line)
init_msg = re.match(INIT_MSG, line)
if start_msg:
self._queue.put((ZephyrFvpMakeResult.FVP_STARTED, int(start_msg.group(1))))
elif init_msg:
self._queue.put((ZephyrFvpMakeResult.MICROTVM_API_SERVER_INIT, None))
break
else:
line = re.sub("[^a-zA-Z0-9 \n]", "", line)
pattern = r"recipe for target (\w*) failed"
if re.search(pattern, line, re.IGNORECASE):
self._queue.put((ZephyrFvpMakeResult.MAKE_FAILED, None))
self._queue.put((ZephyrFvpMakeResult.EOF, None))
def _wait_for_fvp(self):
"""waiting for the START_MSG to appear on the stdout"""
while True:
try:
item = self._queue.get(timeout=120)
except Exception:
raise TimeoutError("FVP setup timeout.")
if item[0] == ZephyrFvpMakeResult.FVP_STARTED:
return item[1]
if item[0] in [ZephyrFvpMakeResult.MAKE_FAILED, ZephyrFvpMakeResult.EOF]:
raise RuntimeError("FVP setup failed.")
raise ValueError(f"{item} not expected.")
def _wait_for_semihost_init(self):
"""waiting for the INIT_MSG to appear on the stdout"""
while True:
try:
item = self._queue.get(timeout=240)
except Exception:
raise TimeoutError("semihost init timeout.")
if item[0] == ZephyrFvpMakeResult.MICROTVM_API_SERVER_INIT:
return
raise ValueError(f"{item} not expected.")
def close(self):
self._model._shutdown_model()
self._model.client.disconnect(force=True)
parent = psutil.Process(self.proc.pid)
if parent:
for child in parent |
.children(recursive=True):
child.terminate()
parent.terminate()
def read(self, n, timeout_sec):
return self._target.stdout.read(n, timeout_sec)
def write(self, data, timeout_sec):
self._target.stdin.write(data)
if __name__ == "__main__":
server.main(Handler()) |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
static uint8_t g_aot_memory[WORKSPACE_SIZE];
tvm_workspace_t app_workspace;
const unsigned char CMD_WAKEUP[] = "wakeup\n";
const unsigned char CMD_READY[] = "ready\n";
const unsigned char CMD_INIT[] = "init";
const unsigned char CMD_INFER[] = "infer";
size_t TVMPlatformFormatMessage(char* out_buf, size_t out_buf_size_bytes, const char* fmt,
va_list args) {
return vsnprintk(out_buf, out_buf_size_bytes, fmt, args);
}
void TVMLogf(const char* msg, ...) {
char buffer[256];
int size;
va_list args;
va_start(args, msg);
size = vsprintf(buffer, msg, args);
va_end(args);
TVMPlatformWriteSerial(buffer, (uint32_t)size);
}
void TVMPlatformAbort(tvm_crt_error_t error) {
TVMLogf("TVMPlatformAbort: %08x\n", error);
sys_reboot(SYS_REBOOT_COLD);
for (;;)
;
}
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
return StackMemoryManager_Allocate(&app_workspace, num_bytes, out_ptr);
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
return StackMemoryManager_Free(&app_workspace, ptr);
}
void timer_expiry_function(struct k_timer* timer_id) { return; }
struct k_timer g_microtvm_timer;
uint32_t g_microtvm_start_time;
int g_m |
icrotvm_timer_running = 0;
tvm_crt_error_t TVMPlatformTimerStart() {
if (g_microtvm_timer_running) {
TVMLogf("timer already running");
return kTvmErrorPlatformTimerBadState;
}
k_timer_start(&g_microtvm_timer, TIME_TIL_EXPIRY, TIME_TIL_EXPIRY);
g_microtvm_start_time = k_cycle_get_32();
g_microtvm_timer_running = 1;
return kTvmErrorNoError;
}
tvm_crt_error_t TVMPlatformTimerStop(double* elapsed_time_seconds) {
if (!g_microtvm_timer_running) {
TVMLogf("timer not running");
return kTvmErrorSystemErrorMask | 2;
}
uint32_t stop_time = k_cycle_get_32();
uint32_t cycles_spent = stop_time - g_microtvm_start_time;
if (stop_time < g_microtvm_start_time) {
cycles_spent = ~((uint32_t)0) - (g_microtvm_start_time - stop_time);
}
uint32_t ns_spent = (uint32_t)k_cyc_to_ns_floor64(cycles_spent);
double hw_clock_res_us = ns_spent / 1000.0;
int32_t time_remaining_ms = k_timer_remaining_get(&g_microtvm_timer);
k_timer_stop(&g_microtvm_timer);
if (time_remaining_ms < 0) {
return kTvmErrorSystemErrorMask | 3;
}
uint32_t num_expiries = k_timer_status_get(&g_microtvm_timer);
uint32_t timer_res_ms = ((num_expiries * MILLIS_TIL_EXPIRY) + time_remaining_ms);
double approx_num_cycles =
(double)k_ticks_to_cyc_floor32(1) * (double)k_ms_to_ticks_ceil32(timer_res_ms);
if (approx_num_cycles > (0.5 * (~((uint32_t)0)))) {
*elapsed_time_seconds = timer_res_ms / 1000.0;
} else {
*elapsed_time_seconds = hw_clock_res_us / 1e6;
}
g_microtvm_timer_running = 0;
return kTvmErrorNoError;
}
void* TVMBackendAllocWorkspace(int device_type, int device_id, uint64_t nbytes, int dtype_code_hint,
int dtype_bits_hint) {
tvm_crt_error_t err = kTvmErrorNoError;
void* ptr = 0;
DLDevice dev = {device_type, device_id};
assert(nbytes > 0);
err = TVMPlatformMemoryAllocate(nbytes, dev, &ptr);
CHECK_EQ(err, kTvmErrorNoError,
"TVMBackendAllocWorkspace(%d, %d, %" PRIu64 ", %d, %d) -> %" PRId32, de |
vice_type,
device_id, nbytes, dtype_code_hint, dtype_bits_hint, err);
return ptr;
}
int TVMBackendFreeWorkspace(int device_type, int device_id, void* ptr) {
tvm_crt_error_t err = kTvmErrorNoError;
DLDevice dev = {device_type, device_id};
err = TVMPlatformMemoryFree(ptr, dev);
return err;
}
static uint8_t main_rx_buf[128];
static uint8_t g_cmd_buf[128];
static size_t g_cmd_buf_ind;
void TVMInfer() {
struct tvmgen_default_inputs inputs = {
.input_1 = input_data,
};
struct tvmgen_default_outputs outputs = {
.Identity = output_data,
};
StackMemoryManager_Init(&app_workspace, g_aot_memory, WORKSPACE_SIZE);
double elapsed_time = 0;
TVMPlatformTimerStart();
int ret_val = tvmgen_default_run(&inputs, &outputs);
TVMPlatformTimerStop(&elapsed_time);
if (ret_val != 0) {
TVMLogf("Error: %d\n", ret_val);
TVMPlatformAbort(kTvmErrorPlatformCheckFailure);
}
size_t max_ind = -1;
float max_val = -FLT_MAX;
for (size_t i = 0; i < output_data_len; i++) {
if (output_data[i] >= max_val) {
max_ind = i;
max_val = output_data[i];
}
}
TVMLogf("result:%d:%d\n", max_ind, (uint32_t)(elapsed_time * 1000));
}
void command_ready(char* command) {
if (strncmp(command, CMD_INIT, CMD_SIZE) == 0) {
TVMPlatformWriteSerial(CMD_WAKEUP, sizeof(CMD_WAKEUP));
} else if (strncmp(command, CMD_INFER, CMD_SIZE) == 0) {
TVMInfer();
} else {
TVMPlatformWriteSerial(CMD_READY, sizeof(CMD_READY));
}
}
void serial_callback(char* message, int len_bytes) {
for (int i = 0; i < len_bytes; i++) {
if (message[i] == CMD_TERMINATOR) {
g_cmd_buf[g_cmd_buf_ind] = (char)0;
command_ready(g_cmd_buf);
g_cmd_buf_ind = 0;
} else {
g_cmd_buf[g_cmd_buf_ind] = message[i];
g_cmd_buf_ind += 1;
}
}
}
void main(void) {
g_cmd_buf_ind = 0;
memset((char*)g_cmd_buf, 0, sizeof(g_cmd_buf));
TVMPlatformUARTInit();
k_timer_init(&g_microtvm_timer, NULL, NULL);
while (true) {
int bytes_read = TVMPlatformUartRxRead(ma |
in_rx_buf, sizeof(main_rx_buf));
if (bytes_read > 0) {
serial_callback(main_rx_buf, bytes_read);
}
}
posix_exit(0);
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
static const struct device* g_microtvm_uart;
RING_BUF_DECLARE(uart_rx_rbuf, RING_BUF_SIZE_BYTES);
static uint8_t uart_data[8];
void uart_irq_cb(const struct device* dev, void* user_data) {
while (uart_irq_update(dev) && uart_irq_is_pending(dev)) {
struct ring_buf* rbuf = (struct ring_buf*)user_data;
if (uart_irq_rx_ready(dev) != 0) {
for (;;) {
int bytes_read = uart_fifo_read(dev, uart_data, sizeof(uart_data));
if (bytes_read < 0) {
TVMPlatformAbort((tvm_crt_error_t)(0xbeef1));
} else if (bytes_read == 0) {
break;
}
int bytes_written = ring_buf_put(rbuf, uart_data, bytes_read);
if (bytes_read != bytes_written) {
TVMPlatformAbort((tvm_crt_error_t)(0xbeef2));
}
}
}
}
}
void uart_rx_init(struct ring_buf* rbuf, const struct device* dev) {
uart_irq_callback_user_data_set(dev, uart_irq_cb, (void*)rbuf);
uart_irq_rx_enable(dev);
}
uint32_t TVMPlatformUartRxRead(uint8_t* data, uint32_t data_size_bytes) {
unsigned int key = irq_lock();
uint32_t bytes_read = ring_buf_get(&uart_rx_rbuf, data, data_size_bytes);
irq_unlock(key);
return bytes_read;
}
uint32_t TVMPlatformWriteSerial(const char* data, uint32_t size) |
{
for (uint32_t i = 0; i < size; i++) {
uart_poll_out(g_microtvm_uart, data[i]);
}
return size;
}
void TVMPlatformUARTInit() {
g_microtvm_uart = device_get_binding(DT_LABEL(DT_CHOSEN(zephyr_console)));
const struct uart_config config = {.baudrate = 115200,
.parity = UART_CFG_PARITY_NONE,
.stop_bits = UART_CFG_STOP_BITS_1,
.data_bits = UART_CFG_DATA_BITS_8,
.flow_ctrl = UART_CFG_FLOW_CTRL_NONE};
uart_configure(g_microtvm_uart, &config);
uart_rx_init(&uart_rx_rbuf, g_microtvm_uart);
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_APPS_MICROTVM_ZEPHYR_AOT_STANDALONE_DEMO_ZEPHYR_UART_H_
#define TVM_APPS_MICROTVM_ZEPHYR_AOT_STANDALONE_DEMO_ZEPHYR_UART_H_
#include <stdint.h>
// Used to read data from the UART.
/*!
* \brief Read Uart Rx buffer.
* \param data Pointer to read data.
* \param data_size_bytes Read request size in bytes.
*
* \return Number of data read in bytes.
*/
uint32_t TVMPlatformUartRxRead(uint8_t* data, uint32_t data_size_bytes);
/*!
* \brief Write data in serial.
* \param data Pointer to data to write.
* \param size Size of data in bytes.
*
* \return Number of write in bytes.
*/
uint32_t TVMPlatformWriteSerial(const char* data, uint32_t size);
/*!
* \brief Initialize Uart.
*/
void TVMPlatformUARTInit();
#endif /* TVM_APPS_MICROTVM_ZEPHYR_AOT_STANDALONE_DEMO_ZEPHYR_UART_H_ */
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
*
* SPDX-License-Identifier: Apache-2.0
*/
int32_t stdout_fd;
int32_t stdin_fd;
uint32_t semihost_cmd(uint32_t opcode, void* arg) {
uint32_t ret_val;
__asm__ volatile(
"mov r0, %[opcode]\n\t"
"mov r1, %[arg]\n\t"
"bkpt
"mov %[ret_val], r0"
: [ ret_val ] "=r"(ret_val)
: [ opcode ] "r"(opcode), [ arg ] "r"(arg)
: "r1", "memory");
return ret_val;
}
int32_t stdout_fd;
int32_t stdin_fd;
void init_semihosting() {
struct {
const char* file_name;
uint32_t mode;
uint32_t file_name_len;
} params;
params.file_name = ":tt";
params.mode = 5;
params.file_name_len = 3;
stdout_fd = semihost_cmd(0x01, ¶ms);
params.mode = 0;
stdin_fd = semihost_cmd(0x01, ¶ms);
}
ssize_t semihost_read(uint8_t* data, size_t size) {
struct {
uint32_t file_handle;
const uint8_t* data;
uint32_t size;
} read_req;
read_req.file_handle = stdin_fd;
read_req.data = data;
read_req.size = size;
uint32_t ret_val = semihost_cmd(0x06, &read_req);
return size - ret_val;
}
ssize_t semihost_write(void* unused_context, const uint8_t* data, size_t size) {
struct {
uint32_t file_handle;
const uint8_t* data;
uint32_t size;
} write_req;
write_req.file_handle = stdou |
t_fd;
write_req.data = data;
write_req.size = size;
uint32_t ret_val = semihost_cmd(0x05, &write_req);
return size - ret_val;
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef TVM_APPS_MICROTVM_ZEPHYR_HOST_DRIVEN_SEMIHOST_H_
#define TVM_APPS_MICROTVM_ZEPHYR_HOST_DRIVEN_SEMIHOST_H_
#include <kernel.h>
#include <unistd.h>
#include <zephyr.h>
void init_semihosting();
ssize_t semihost_read(uint8_t* data, size_t size);
ssize_t semihost_write(void* unused_context, const uint8_t* data, size_t size);
#endif /* TVM_APPS_MICROTVM_ZEPHYR_HOST_DRIVEN_SEMIHOST_H_ */
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* This is a sample Zephyr-based application that contains the logic
* needed to control a microTVM-based model via the UART. This is only
* intended to be a demonstration, since typically you will want to incorporate
* this logic into your own application.
*/
static const struct device* tvm_uart;
static const struct device* led0_pin;
static size_t g_num_bytes_requested = 0;
static size_t g_num_bytes_written = 0;
static size_t g_num_bytes_in_rx_buffer = 0;
ssize_t uart_write(void* unused_context, const uint8_t* data, size_t size) {
gpio_pin_set(led0_pin, LED0_PIN, 1);
g_num_bytes_requested += size;
for (size_t i = 0; i < size; i++) {
uart_poll_out(tvm_uart, data[i]);
g_num_bytes_written++;
}
gpio_pin_set(led0_pin, LED0_PIN, 0);
return size;
}
ssize_t serial_write(void* unused_context, const uint8_t* data, size_t size) {
return semihost_write(unused_context, data, size);
return uart_write(unused_context, data, size);
}
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t* esf) {
gpio_pin_set(led0_pin, LED0_PIN, 1);
for (;;)
;
}
size_t TVMPlatformFormatMessage(char* out_buf, size_t out_buf_size_bytes, |
const char* fmt,
va_list args) {
return vsnprintk(out_buf, out_buf_size_bytes, fmt, args);
}
void TVMPlatformAbort(tvm_crt_error_t error) {
TVMLogf("TVMError: 0x%x", error);
sys_reboot(SYS_REBOOT_COLD);
gpio_pin_set(led0_pin, LED0_PIN, 1);
for (;;)
;
}
tvm_crt_error_t TVMPlatformGenerateRandom(uint8_t* buffer, size_t num_bytes) {
uint32_t random;
size_t num_full_blocks = num_bytes / sizeof(random);
for (int i = 0; i < num_full_blocks; ++i) {
random = sys_rand32_get();
memcpy(&buffer[i * sizeof(random)], &random, sizeof(random));
}
size_t num_tail_bytes = num_bytes % sizeof(random);
if (num_tail_bytes > 0) {
random = sys_rand32_get();
memcpy(&buffer[num_bytes - num_tail_bytes], &random, num_tail_bytes);
}
return kTvmErrorNoError;
}
K_HEAP_DEFINE(tvm_heap, HEAP_SIZE_BYTES);
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
*out_ptr = k_heap_alloc(&tvm_heap, num_bytes, K_NO_WAIT);
return (*out_ptr == NULL) ? kTvmErrorPlatformNoMemory : kTvmErrorNoError;
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
k_heap_free(&tvm_heap, ptr);
return kTvmErrorNoError;
}
volatile timing_t g_microtvm_start_time, g_microtvm_end_time;
int g_microtvm_timer_running = 0;
tvm_crt_error_t TVMPlatformTimerStart() {
if (g_microtvm_timer_running) {
TVMLogf("timer already running");
return kTvmErrorPlatformTimerBadState;
}
gpio_pin_set(led0_pin, LED0_PIN, 1);
g_microtvm_start_time = timing_counter_get();
g_microtvm_timer_running = 1;
return kTvmErrorNoError;
}
tvm_crt_error_t TVMPlatformTimerStop(double* elapsed_time_seconds) {
if (!g_microtvm_timer_running) {
TVMLogf("timer not running");
return kTvmErrorSystemErrorMask | 2;
}
gpio_pin_set(led0_pin, LED0_PIN, 0);
g_microtvm_end_time = timing_counter_get();
uint64_t cycles = timing_cycles_get(&g_microtvm_start_time, &g_microtvm_end_time);
uint64_t ns_spent = timing_cycles_to_ |
ns(cycles);
*elapsed_time_seconds = ns_spent / (double)1e9;
g_microtvm_timer_running = 0;
return kTvmErrorNoError;
}
RING_BUF_ITEM_DECLARE_SIZE(uart_rx_rbuf, RING_BUF_SIZE_BYTES);
void uart_irq_cb(const struct device* dev, void* user_data) {
uart_irq_update(dev);
if (uart_irq_is_pending(dev)) {
struct ring_buf* rbuf = (struct ring_buf*)user_data;
if (uart_irq_rx_ready(dev) != 0) {
uint8_t* data;
uint32_t size;
size = ring_buf_put_claim(rbuf, &data, RING_BUF_SIZE_BYTES);
int rx_size = uart_fifo_read(dev, data, size);
g_num_bytes_in_rx_buffer += rx_size;
if (g_num_bytes_in_rx_buffer > RING_BUF_SIZE_BYTES) {
TVMPlatformAbort((tvm_crt_error_t)0xbeef3);
}
if (rx_size < 0) {
TVMPlatformAbort((tvm_crt_error_t)0xbeef1);
}
int err = ring_buf_put_finish(rbuf, rx_size);
if (err != 0) {
TVMPlatformAbort((tvm_crt_error_t)0xbeef2);
}
}
}
}
void uart_rx_init(struct ring_buf* rbuf, const struct device* dev) {
uart_irq_callback_user_data_set(dev, uart_irq_cb, (void*)rbuf);
uart_irq_rx_enable(dev);
}
extern void __stdout_hook_install(int (*hook)(int));
void main(void) {
int ret;
led0_pin = device_get_binding(LED0);
if (led0_pin == NULL) {
for (;;)
;
}
ret = gpio_pin_configure(led0_pin, LED0_PIN, GPIO_OUTPUT_ACTIVE | LED0_FLAGS);
if (ret < 0) {
TVMPlatformAbort((tvm_crt_error_t)0xbeef4);
}
gpio_pin_set(led0_pin, LED0_PIN, 1);
tvm_uart = device_get_binding(DT_LABEL(DT_CHOSEN(zephyr_console)));
uart_rx_init(&uart_rx_rbuf, tvm_uart);
timing_init();
timing_start();
init_semihosting();
for (int i = 0; i < 100; ++i) {
uart_write(NULL, "dummy log...\n", 13);
}
uart_write(NULL, "microTVM Zephyr runtime - running\n", 34);
microtvm_rpc_server_t server = MicroTVMRpcServerInit(serial_write, NULL);
TVMLogf("microTVM Zephyr runtime - running");
gpio_pin_set(led0_pin, LED0_PIN, 0);
while (true) { |
uint8_t data[128];
uint32_t bytes_read = semihost_read(data, 128);
uint8_t* data;
unsigned int key = irq_lock();
uint32_t bytes_read = ring_buf_get_claim(&uart_rx_rbuf, &data, RING_BUF_SIZE_BYTES);
if (bytes_read > 0) {
uint8_t* ptr = data;
size_t bytes_remaining = bytes_read;
while (bytes_remaining > 0) {
tvm_crt_error_t err = MicroTVMRpcServerLoop(server, &ptr, &bytes_remaining);
if (err != kTvmErrorNoError && err != kTvmErrorFramingShortPacket) {
TVMPlatformAbort(err);
}
}
}
g_num_bytes_in_rx_buffer -= bytes_read;
if (g_num_bytes_written != 0 || g_num_bytes_requested != 0) {
if (g_num_bytes_written != g_num_bytes_requested) {
TVMPlatformAbort((tvm_crt_error_t)0xbeef5);
}
g_num_bytes_written = 0;
g_num_bytes_requested = 0;
}
}
int err = ring_buf_get_finish(&uart_rx_rbuf, bytes_read);
if (err != 0) {
TVMPlatformAbort((tvm_crt_error_t)0xbeef6);
}
}
irq_unlock(key);
}
posix_exit(0);
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file include/crt_config.h
* \brief CRT configuration for demo app.
*/
#ifndef TVM_RUNTIME_CRT_CONFIG_H_
#define TVM_RUNTIME_CRT_CONFIG_H_
/*! Log level of the CRT runtime */
#define TVM_CRT_LOG_LEVEL TVM_CRT_LOG_LEVEL_DEBUG
/*! Support low-level debugging in MISRA-C runtime */
#define TVM_CRT_DEBUG 0
/*! Maximum supported dimension in NDArray */
#define TVM_CRT_MAX_NDIM 6
/*! Maximum supported arguments in generated functions */
#define TVM_CRT_MAX_ARGS 10
/*! Maximum supported string length in dltype, e.g. "int8", "int16", "float32"
*/
#define TVM_CRT_MAX_STRLEN_DLTYPE 10
/*! Maximum supported string length in function names */
#define TVM_CRT_MAX_STRLEN_FUNCTION_NAME 120
/*! Maximum supported string length in parameter names */
#define TVM_CRT_MAX_STRLEN_PARAM_NAME 80
/*! Maximum number of registered modules. */
#define TVM_CRT_MAX_REGISTERED_MODULES 2
/*! Size of the global function registry, in bytes. */
#define TVM_CRT_GLOBAL_FUNC_REGISTRY_SIZE_BYTES 512
/*! Maximum packet size, in bytes, including the length header. */
#define TVM_CRT_MAX_PACKET_SIZE_BYTES 512
#endif // TVM_RUNTIME_CRT_CONFIG_H_
|
import os |
import pathlib |
import sys |
import numpy as np
def create_file(name, prefix, tensor_name, tensor_data, output_path):
"""
This function generates a header file containing the data from the numpy array provided.
"""
file_path = pathlib.Path(f"{output_path}/" + name).resolve()
raw_path = file_path.with_suffix(".c").resolve()
with open(raw_path, "w") as header_file:
header_file.write(
"
"
f"const size_t {tensor_name}_len = {tensor_data.size};\n"
f"{prefix} float {tensor_name}_storage[] = "
)
header_file.write("{")
for i in np.ndindex(tensor_data.shape):
header_file.write(f"{tensor_data[i]}, ")
header_file.write("};\n\n")
def create_files(input_file, output_dir):
"""
This function generates C files for the input and output arrays required to run inferences
"""
os.makedirs(output_dir, exist_ok=True)
input_data = np.loadtxt(input_file)
create_file("inputs", "const", "input", input_data, output_dir)
output_data = np.zeros([12], np.float32)
create_file(
"outputs",
"",
"output",
output_data,
output_dir,
)
if __name__ == "__main__":
create_files(sys.argv[1], sys.argv[2]) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import sys
def create_labels_header(labels_file, output_path):
"""
This function generates a header file containing the ImageNet labels as an array of strings
"""
labels_path = pathlib.Path(labels_file).resolve()
file_path = pathlib.Path(f"{output_path}/labels.c").resolve()
with open(labels_path) as f:
labels = f.readlines()
with open(file_path, "w") as header_file:
header_file.write(f"char* labels[] = {{")
for _, label in enumerate(labels):
header_file.write(f'"{label.rstrip()}",')
header_file.write("};\n")
if __name__ == "__main__":
create_labels_header(sys.argv[1], sys.argv[2])
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
extern char* labels[12];
extern float input_storage[490];
extern float output_storage[12];
extern const size_t output_len;
static uint8_t __attribute__((aligned(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES)))
g_crt_workspace[TVMGEN_DEFAULT_WORKSPACE_SIZE];
tvm_workspace_t app_workspace;
void TVMLogf(const char* msg, ...) {
va_list args;
va_start(args, msg);
vfprintf(stderr, msg, args);
va_end(args);
}
void __attribute__((noreturn)) TVMPlatformAbort(tvm_crt_error_t error_code) {
fprintf(stderr, "TVMPlatformAbort: %d\n", error_code);
exit(-1);
}
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
uintptr_t ret = StackMemoryManager_Allocate(&app_workspace, num_bytes, out_ptr);
return ret;
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
return StackMemoryManager_Free(&app_workspace, ptr);
}
void main(void) {
StackMemoryManager_Init(&app_workspace, g_crt_workspace, TVMGEN_DEFAULT_WORKSPACE_SIZE);
struct tvmgen_default_inputs inputs = {.input = input_storage};
struct tvmgen_default_outputs outputs = {.Identity = output_storage};
if (tvmgen_default_run(&inputs, &outputs) != 0) {
printk("Model run failed\n");
exit(-1);
}
float max_value = 0.0;
size_t max |
_index = -1;
for (unsigned int i = 0; i < output_len; ++i) {
if (output_storage[i] > max_value) {
max_value = output_storage[i];
max_index = i;
}
}
printk("The word is '%s'!\n", labels[max_index]);
exit(0);
} |
"""Test script for tvm torch module""" |
import tempfile |
import numpy as np |
import torch |
import torch.nn |
import tvm
from tvm.target.target |
import Target |
import tvm.testing
from tvm.contrib.torch |
import as_torch
from tvm.script |
import tir as T
@as_torch
def matmul(M: int, N: int, K: int, dtype: str):
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [M, K], dtype=dtype)
B = T.match_buffer(b, [N, K], dtype=dtype)
C = T.match_buffer(c, [M, N], dtype=dtype)
for i, j, k in T.grid(M, N, K):
with T.block():
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
return main
@as_torch
@tvm.script.ir_module
class ModuleGPU:
@T.prim_func
def main(A: T.Buffer[8, "float32"], B: T.Buffer[8, "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i_0 in T.thread_binding(2, thread="blockIdx.x"):
for i_2 in T.thread_binding(2, thread="threadIdx.x"):
for i_1 in T.serial(2):
with T.block("B"):
vi = T.axis.spatial(8, i_0 * 4 + i_1 * 2 + i_2)
T.reads(A[vi])
T.writes(B[vi])
B[vi] = A[vi] + T.float32(1)
@as_torch
@T.prim_func
def func_with_part_access_region(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
for i, j in T.grid(128, 128):
with T.block("s1"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
B[vi, vj] = A[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block("s2"):
vi, vj = T.axis.remap("SS", [i, j])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
@as_torch
@tvm.script.ir_module
class MyModule:
@T.prim_func
def main(a: T.handle, b: T.handle):
T.func_attr({"global_symbol": "main", "tir.noalia |
s": True})
A = T.match_buffer(a, (8,), dtype="float32")
B = T.match_buffer(b, (8,), dtype="float32")
for i in range(8):
with T.block("B"):
vi = T.axis.spatial(8, i)
B[vi] = A[vi] + 1.0
@as_torch
@T.prim_func
def loop_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, ko in T.grid(128, 4):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, ko * 32 + ki)
T.reads([B[vi], A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@as_torch
def elementwise_with_root(M: int, N: int, dtype: str):
@T.prim_func
def f(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [M, N])
B = T.match_buffer(b, [M, N])
C = T.match_buffer(c, [M, N])
with T.block():
for i, j in T.grid(M, N):
with T.block("s1"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + T.float32(1)
for i, j in T.grid(M, N):
with T.block("s2"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
return f |
class MinuesOnes(torch.nn.Module):
def __init__(self):
super(MinuesOnes, self).__init__()
self.engine = MyModule
def forward(self, *input):
self.engine.forward(*input)
return input[-1] - 1
def test_tvmscript_torch_matmul():
s1 = np.random.rand(128, 128).astype("float32")
s2 = np.random.rand(128, 128).astype("float32")
s3 = np.random.rand(128, 128).astype("float32")
q1 = torch.from_numpy(s1)
q2 = torch.from_numpy(s2)
q3 = torch.from_numpy(s3)
numpy_result = np.matmul(s1, np.transpose(s2))
nn_module = matmul(128, 128, 128, "float32")
nn_module(q1, q2, q3)
tvm.testing.assert_allclose(q3.numpy(), numpy_result, atol=1e-5, rtol=1e-5)
def test_tvmscript_torch_decorator():
q1 = torch.arange(8).type(torch.float32)
q2 = torch.zeros((8,), dtype=torch.float32)
MyModule(q1, q2)
tvm.testing.assert_allclose(q2.numpy(), (q1 + 1).numpy(), atol=1e-5, rtol=1e-5)
def test_tvmscript_torch_gpu():
cuda0 = torch.device("cuda:0")
q1 = torch.arange(8, device=cuda0).type(torch.float32)
q2 = torch.zeros((8,), dtype=torch.float32, device=cuda0)
with tempfile.NamedTemporaryFile(suffix=".pt") as tmp:
torch.save(ModuleGPU, tmp.name)
loaded_mod = torch.load(tmp.name)
loaded_mod(q1, q2)
tvm.testing.assert_allclose(q2.cpu().numpy(), (q1 + 1).cpu().numpy(), atol=1e-5, rtol=1e-5)
def test_torch_with_tvmscript():
ref_result = np.arange(8).astype("float32")
q1 = torch.arange(8).type(torch.float32)
q2 = torch.zeros((8,), dtype=torch.float32)
nn_module = MinuesOnes()
ret = nn_module.forward(q1, q2)
tvm.testing.assert_allclose(ret.numpy(), ref_result, atol=1e-5, rtol=1e-5)
def test_tvmscript_torch_func_with_part_access_region():
a1 = torch.rand(128, 128)
a2 = torch.zeros(128, 128)
a3 = torch.zeros(128, 128)
result = a1 + 2
func_with_part_access_region.tune()
func_with_part_access_region(a1, a2, a3)
tvm.testing.assert_allclose(a3.numpy(), r |
esult.numpy(), atol=1e-5, rtol=1e-5)
def test_tvmscript_torch_loop_split():
x = torch.rand(128, 128).cuda()
y = torch.zeros(128).cuda()
result = torch.sum(x.cpu(), dim=1).numpy()
loop_split.tune(
"nvidia/geforce-rtx-3070",
max_trials_global=128,
strategy="replay-trace",
)
loop_split(x, y)
tvm.testing.assert_allclose(y.cpu().numpy(), result, atol=1e-5, rtol=1e-5)
def test_tvmscript_torch_elementwise_with_root():
a1 = torch.rand(128, 128)
a2 = torch.zeros(128, 128)
a3 = torch.zeros(128, 128)
result = a1 + 2
func = elementwise_with_root(128, 128, "float32")
func.tune(
max_trials_global=128,
strategy="replay-trace",
)
func(a1, a2, a3)
tvm.testing.assert_allclose(a3.numpy(), result.numpy(), atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_tvmscript_torch_matmul()
test_tvmscript_torch_decorator()
test_tvmscript_torch_gpu()
test_torch_with_tvmscript()
test_tvmscript_torch_func_with_part_access_region()
test_tvmscript_torch_loop_split()
test_tvmscript_torch_elementwise_with_root() |
"""Test script for boolean tensor support""" |
import tempfile |
import torch |
import tvm |
import tvm.testing
from tvm.contrib.torch |
import as_torch, optimize_torch
from tvm.script |
import tir as T
def negate(x):
return x.logical_not()
def sum_up_tensor(x):
return x.size(dim=0) - torch.sum(x.int())
def tensor_boolean_operation(x):
arr1 = (x + 0.3).floor().bool()
arr2 = (~((x + 0.7).int().bool())).bool()
ret = ((arr1 & arr2).byte() + 0.5).half()
return ~(ret.bool())
def test_bool_tensor_negate():
input = torch.ones(1, dtype=torch.bool)
optimized_negate = optimize_torch(
negate,
input,
)
with tempfile.NamedTemporaryFile(suffix=".pt") as tmp:
torch.save(optimized_negate, tmp.name)
loaded_mod = torch.load(tmp.name)
output = loaded_mod(negate(input))
tvm.testing.assert_allclose(input.numpy(), output.numpy(), atol=1e-5, rtol=1e-5)
def test_sum_up_tensor():
x = torch.randint(0, 2, (16,))
y = x.bool()
optimized_func = optimize_torch(
sum_up_tensor,
(y,),
)
ret1 = (x[x == 0]).size(dim=0)
ret2 = optimized_func(y).numpy()
tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5)
def test_tensor_boolean_operation():
input = torch.rand(200)
model = optimize_torch(
tensor_boolean_operation,
input,
)
ret1 = tensor_boolean_operation(input)
ret2 = model(input)
tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5)
@as_torch
@T.prim_func
def negate_tvmscript(
X: T.Buffer[(8, 8), "bool"],
Y: T.Buffer[(8, 8), "float32"],
Z: T.Buffer[(8, 8), "bool"],
U: T.Buffer[(8, 8), "float32"],
) -> None:
for i, j in T.grid(8, 8):
with T.block():
if Y[i, j] > 0.0:
Z[i, j] = X[i, j]
U[i, j] = Y[i, j]
else:
Z[i, j] = not X[i, j]
U[i, j] = 0.0 - Y[i, j]
def negate_vanila(x, y):
z = torch.zeros(8, 8).bool()
for i in range(8):
for j in range(8):
if y[i, j] > 0:
z[i, j] = x[i, j]
else:
z[i, j] = ~x[i, j]
return z
def test_tvmscript_torch_decorator() |
:
q1 = (torch.rand(8, 8) + 0.5).int().bool()
q2 = torch.rand(8, 8) - 0.5
q3 = torch.zeros(8, 8).bool()
q4 = torch.zeros(8, 8)
std1 = negate_vanila(q1, q2)
std2 = torch.abs(q2)
negate_tvmscript(q1, q2, q3, q4)
tvm.testing.assert_allclose(std1.numpy(), q3.numpy(), atol=1e-5, rtol=1e-5)
tvm.testing.assert_allclose(std2.numpy(), q4.numpy(), atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_tvmscript_torch_decorator()
test_bool_tensor_negate()
test_sum_up_tensor()
test_tensor_boolean_operation() |
"""Test script for tvm torch module""" |
import tempfile |
import torch
from torch.utils |
import benchmark
from torchvision.models |
import resnet18 |
import tvm |
import tvm.testing
from tvm.contrib.torch |
import optimize_torch
from tvm.meta_schedule |
import TuneConfig
def test_matmul_tuning_relay():
def matmul(x, w):
return torch.matmul(x, w)
x = torch.randn(15, 20)
w = torch.randn(20, 30)
example_inputs = (x, w)
rt_mod = optimize_torch(matmul, example_inputs)
torch_answer = torch.matmul(x, w).numpy()
tvm_answer = rt_mod(x, w).numpy()
tvm.testing.assert_allclose(torch_answer, tvm_answer, atol=1e-5, rtol=1e-5) |
class InnerModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 20, 5)
def forward(self, x):
return torch.nn.functional.relu(self.conv(x)) |
class SimpleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(20, 20, 5)
self.relu = InnerModel()
def forward(self, x):
x = self.relu(x)
return torch.nn.functional.relu(self.conv(x))
def test_nested_module():
simple_module = SimpleModel()
example_input = torch.randn(20, 1, 10, 10)
optimized_module = optimize_torch(simple_module, example_input)
ret1 = simple_module(example_input).detach().numpy()
ret2 = optimized_module(example_input).detach().numpy()
tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5)
def test_save_load_function():
def foo(x):
return 2 * x + 1
example_input = torch.rand(3)
opt_foo = optimize_torch(foo, example_input)
ret1 = opt_foo(example_input)
with tempfile.NamedTemporaryFile(suffix=".pt") as tmp:
torch.save(opt_foo, tmp.name)
loaded_mod = torch.load(tmp.name)
ret2 = loaded_mod(example_input)
tvm.testing.assert_allclose(ret1.numpy(), ret2.numpy(), atol=1e-5, rtol=1e-5) |
class MyResNet18(torch.nn.Module):
def __init__(self, config, target=None):
super(MyResNet18, self).__init__()
self.means = torch.nn.Parameter(
torch.tensor([103.939, 116.779, 123.68]).resize_(1, 3, 1, 1)
).cuda()
self.resnet = optimize_torch(resnet18(), [torch.rand(1, 3, 224, 224)], config, target)
def forward(self, input):
return self.resnet(input - self.means) |
class JitModule(torch.nn.Module):
def __init__(self):
super(JitModule, self).__init__()
self.means = torch.nn.Parameter(
torch.tensor([103.939, 116.779, 123.68]).resize_(1, 3, 1, 1)
).cuda()
self.resnet = torch.jit.optimize_for_inference(torch.jit.script(resnet18().cuda().eval()))
def forward(self, input):
return self.resnet(input - self.means)
config = TuneConfig(
strategy="evolutionary",
num_trials_per_iter=4,
max_trials_per_task=8,
max_trials_global=16,
)
if torch.cuda.is_available():
target_cuda = "nvidia/geforce-rtx-3070"
meta_module_resnet18 = MyResNet18(config, target_cuda)
jit_module_resnet18 = JitModule()
def compare_optimize_resnet18_to_torchscript():
results = []
for i in range(20):
test_input = torch.rand(1, 3, 224, 224).half().cuda()
sub_label = f"[test {i}]"
results.append(
benchmark.Timer(
stmt="meta_module_resnet18(test_input)",
setup="from __main__ |
import meta_module_resnet18",
globals={"test_input": test_input},
sub_label=sub_label,
description="tuning by meta",
).blocked_autorange()
)
results.append(
benchmark.Timer(
stmt="jit_module_resnet18(test_input)",
setup="from __main__ |
import jit_module_resnet18",
globals={"test_input": test_input},
sub_label=sub_label,
description="tuning by jit",
).blocked_autorange()
)
compare = benchmark.Compare(results)
compare.print()
if __name__ == "__main__":
test_matmul_tuning_relay()
test_nested_module()
test_save_load_function()
if torch.cuda.is_available():
compare_optimize_resnet18_to_torchscript() |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import torch
import time
import tvm
from tvm.contrib.torch import compile
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor):
return x * x
model = Model()
x = torch.rand([1, 3, 224, 224])
model_jit = torch.jit.trace(model, x)
print(model_jit.graph)
print("run torchscript...")
for i in range(20):
t = time.time()
model_jit(x)
print(time.time() - t)
option = {
"input_infos": [
("x", (1, 3, 224, 224)),
],
"default_dtype": "float16",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 1, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "llvm",
"device": tvm.cpu(),
}
pytorch_tvm_module = compile(model_jit, option)
torch.jit.script(pytorch_tvm_module).save("model_tvm.pt")
print("Run PyTorch...")
for i in range(20):
t = time.time()
outputs = pytorch_tvm_module.forward([x.cpu()])
print(1000 * (time.time() - t))
print(outputs[0].shape)
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import torch
import time
from torchvision.models import resnet50
import tvm
from tvm.contrib.torch import compile
model = resnet50().half().cuda()
x = torch.rand([1, 3, 224, 224]).half().cuda()
model_jit = torch.jit.trace(model, x)
print(model_jit.graph)
print("run torchscript...")
for i in range(20):
t = time.time()
model_jit(x)
torch.cuda.synchronize()
print(time.time() - t)
option = {
"input_infos": [
("x", (1, 3, 224, 224)),
],
"default_dtype": "float16",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 1, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "cuda",
"device": tvm.cuda(0),
}
pytorch_tvm_module = compile(model_jit, option)
torch.jit.script(pytorch_tvm_module).save("model_tvm.pt")
print("Run PyTorch...")
for i in range(20):
t = time.time()
outputs = pytorch_tvm_module.forward([x])
torch.cuda.synchronize()
print(1000 * (time.time() - t))
print(outputs[0].shape)
|
"""Test script for torch module""" |
import tempfile |
import os |
import logging |
import torch |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import te, relay |
import tvm.contrib.torch
from tvm.contrib |
import graph_runtime
TVM_ASSETS = ["mod.so", "graph.json", "params"]
def test_use_pt_graph_module():
"""main test function"""
def build_export_graph(device):
"""relay build & export graph"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
z = relay.exp(z)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
params = {"y": y_data}
pt_device = torch.device(device)
if pt_device.type == "cuda":
target = "cuda"
ctx = tvm.cuda(pt_device.index)
else:
target = "llvm"
ctx = tvm.cpu(0)
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), target=target, params=params)
mod = graph_runtime.create(graph, lib, device=ctx)
mod.set_input(**params)
mod.set_input(x=x_data)
mod.run()
res = mod.get_output(0).asnumpy()
ref_res = np.exp(y_data + x_data)
tvm.testing.assert_allclose(res, ref_res, atol=1e-5, rtol=1e-5)
export_dir = tempfile.mkdtemp("tvm_export")
lib.export_library(os.path.join(export_dir, TVM_ASSETS[0]))
with open(os.path.join(export_dir, TVM_ASSETS[1]), "w") as fout:
fout.write(graph)
with open(os.path.join(export_dir, TVM_ASSETS[2]), "wb") as fout:
fout.write(relay.save_param_dict(params))
return export_dir
def test_pt_run(device, trace=True, to_device=None):
"""test add lib with Pytorch wrapper"""
print("\n
export_dir = build_export_graph(device)
engine = tvm.contrib.torch.GraphModule(num_inputs=2, num_outputs=1).to(device)
x = np.random.rand(10, 5).astype("float32")
y = np.random.rand(1, 5).astype("float32")
expect = np.exp(y + x)
def get_inputs_by_device(device):
inps = [torch.Tensor(x), torch.Tensor(y)]
if |
device == "cpu":
return inps
else:
device_type, device_id = device.split(":")
assert device_type == "cuda"
return [inp.cuda(int(device_id)) for inp in inps]
assets = [os.path.join(export_dir, i) for i in TVM_ASSETS]
engine.init((x.shape, y.shape), *assets)
outputs = engine.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
if trace:
print("\n
scripted = torch.jit.script(engine)
scripted_dir = tempfile.mkdtemp("scripted")
scripted_path = os.path.join(scripted_dir, "model.pt")
scripted.save(scripted_path)
loaded = torch.jit.load(scripted_path)
outputs = loaded.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del scripted
del loaded
if to_device:
print(
"\n
device, to_device
)
)
engine = engine.to(to_device)
outputs = engine.forward(get_inputs_by_device(to_device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del engine
test_pt_run(device="cuda:0", trace=True, to_device="cuda:1")
test_pt_run(device="cpu", trace=True)
if __name__ == "__main__":
test_use_pt_graph_module() |
"""Test script for torch module""" |
import os |
import torch |
import time |
import numpy as np |
import tvm |
import tvm.testing |
import tempfile
from tvm.contrib.torch |
import PyTorchTVMModule, compile |
class Model(torch.nn.Module):
def forward(self, x, y):
return torch.matmul(x, y.softmax(1))
model = Model()
model.cuda().half()
x = torch.rand([1280, 2464, 4]).cuda().half()
y = torch.rand([1280, 4, 1]).cuda().half()
for i in range(20):
t = time.time()
o = model(x, y)
torch.cuda.synchronize()
print(1000 * (time.time() - t))
print(o.shape)
model_jit = torch.jit.script(model)
print(model_jit.graph)
input_shapes = [("x", list(x.shape)), ("y", list(y.shape))]
dtype = "float16"
export_dir = tempfile.mkdtemp("pytorch_compiled")
print("tmp export_dir:", export_dir)
mod = PyTorchTVMModule()
print("Converting...")
mod.from_pytorch(model_jit, input_shapes, dtype)
log_file = os.path.join(export_dir, "tuning.log")
if not os.path.exists(log_file):
print("Tuning...")
mod.tune_tvm(log_file=log_file, n_trial=20)
print("Building...")
tvm_mod = mod.build_tvm(export_dir)
pytorch_mod = mod.build_pytorch_module(num_inputs=2, num_outputs=1)
print("Run TVM...")
tvm_x = tvm.nd.array(x.cpu().numpy().astype(dtype), device=tvm.gpu(0))
tvm_y = tvm.nd.array(y.cpu().numpy().astype(dtype), device=tvm.gpu(0))
for i in range(20):
t = time.time()
tvm_mod.run(x=tvm_x, y=tvm_y)
print(1000 * (time.time() - t))
tvm_output = tvm_mod.get_output(0)
print(tvm_output.shape)
print("Run PyTorch...")
for i in range(20):
t = time.time()
outputs = pytorch_mod.forward([x, y])
torch.cuda.synchronize()
print(1000 * (time.time() - t))
print(outputs[0].shape) |
class EnsembleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.jit.script(pytorch_mod)
def forward(self, x, y, z) -> torch.Tensor:
if x > 1:
out = self.layer(y, z)[0]
else:
out = torch.ones([1280, 2464, 1])
return out
print("Exporting...")
scripted = torch.jit.script(EnsembleModel())
print(scripted.graph)
scripted_path = os.path.join(export_dir, "model_tvm.pt")
scripted.save(scripted_path) |
"""Test script for torch vm module""" |
import tempfile |
import os |
import logging |
import torch |
import numpy as np |
import tvm
from tvm.contrib.torch.pytorch_tvm |
import TVM_ASSETS |
import tvm.testing
from tvm |
import te, relay |
import tvm.contrib.torch
from tvm.contrib |
import graph_runtime
TVM_ASSETS = ["mod.so", "code.ro"]
def test_use_pt_vm_module():
"""main test function"""
def build_export_vm(device):
"""relay build & export graph"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
z = relay.exp(z)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
pt_device = torch.device(device)
if pt_device.type == "cuda":
target = "cuda"
ctx = tvm.cuda(pt_device.index)
else:
target = "llvm"
ctx = tvm.cpu(0)
exe = relay.vm.compile(tvm.IRModule.from_expr(func), target=target, params={})
code, lib = exe.save()
export_dir = tempfile.mkdtemp("tvm_export")
lib.export_library(os.path.join(export_dir, TVM_ASSETS[0]))
with open(os.path.join(export_dir, TVM_ASSETS[1]), "wb") as fout:
fout.write(code)
vm = tvm.runtime.vm.VirtualMachine(exe, ctx)
res = vm.run(x_data, y_data)
ref_res = np.exp(y_data + x_data)
tvm.testing.assert_allclose(res.numpy(), ref_res, atol=1e-5, rtol=1e-5)
return export_dir
def test_pt_run(device, trace=True, to_device=None, inp_on_cuda=False):
"""test add lib with Pytorch wrapper"""
print("\n
export_dir = build_export_vm(device)
engine = tvm.contrib.torch.VMModule(num_inputs=2, num_outputs=1).to(device)
x = np.random.rand(10, 5).astype("float32")
y = np.random.rand(1, 5).astype("float32")
expect = np.exp(y + x)
def get_inputs_by_device(device):
inps = [torch.Tensor(x), torch.Tensor(y)]
if device == "cpu":
return inps
else:
device_type, device_id = device.split(":")
assert device_type == "cuda"
return [inp.cuda(int(device_id)) for inp in inps] |
assets = [os.path.join(export_dir, i) for i in TVM_ASSETS]
engine.init((x.shape, y.shape), *assets)
outputs = engine.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
if trace:
print("\n
scripted = torch.jit.script(engine)
scripted_dir = tempfile.mkdtemp("scripted")
scripted_path = os.path.join(scripted_dir, "model.pt")
scripted.save(scripted_path)
loaded = torch.jit.load(scripted_path)
outputs = loaded.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del scripted
del loaded
if to_device:
print(
"\n
device, to_device
)
)
engine = engine.to(to_device)
outputs = engine.forward(get_inputs_by_device(to_device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del engine
test_pt_run(device="cuda:0", trace=True, to_device="cuda:1", inp_on_cuda=True)
test_pt_run(device="cpu", trace=True, inp_on_cuda=False)
if __name__ == "__main__":
test_use_pt_vm_module() |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import torch
import time
import tvm
from tvm.contrib.torch import compile, TraceTvmModule, pytorch_tvm
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor, y: torch.Tensor):
return x * y
model = Model()
x = torch.rand([1, 2, 3])
y = torch.rand([1, 2, 3])
model_jit = torch.jit.script(model)
option = {
"input_infos": [("x", (1, 2, 3)), ("y", (1, 2, 3))],
"default_dtype": "float32",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 0, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "llvm",
"device": tvm.cpu(),
}
# use TraceTvmModule to convert List[Tensor] input/output
# to tuple of Tensors
pytorch_tvm_module = compile(model_jit, option)
scripted = torch.jit.script(pytorch_tvm_module)
traced = torch.jit.trace(TraceTvmModule(scripted), (x, y))
res_traced = traced.forward(x, y)
res_expected = pytorch_tvm_module.forward([x, y])[0]
tvm.testing.assert_allclose(res_traced, res_expected)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.