text
stringlengths 1
2.05k
|
---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
static uint8_t g_aot_memory[WORKSPACE_SIZE]
__attribute__((aligned(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES)));
tvm_workspace_t app_workspace;
void TVMPlatformAbort(tvm_crt_error_t error) {
TVMLogf("TVMPlatformAbort: 0x%08x\n", error);
for (;;) {
digitalWrite(LED_BUILTIN, HIGH);
delay(250);
digitalWrite(LED_BUILTIN, LOW);
delay(250);
digitalWrite(LED_BUILTIN, HIGH);
delay(250);
digitalWrite(LED_BUILTIN, LOW);
delay(750);
}
}
void TVMLogf(const char* msg, ...) {}
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
return StackMemoryManager_Allocate(&app_workspace, num_bytes, out_ptr);
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
return StackMemoryManager_Free(&app_workspace, ptr);
}
unsigned long g_utvm_start_time_micros;
int g_utvm_timer_running = 0;
tvm_crt_error_t TVMPlatformTimerStart() {
if (g_utvm_timer_running) {
return kTvmErrorPlatformTimerBadState;
}
g_utvm_timer_running = 1;
g_utvm_start_time_micros = micros();
return kTvmErrorNoError;
}
tvm_crt_error_t TVMPlatformTimerStop(double* elapsed_time_seconds) {
if (!g_utvm_timer_running) {
return kTvmErrorPlatformTimerBadState;
}
g_utvm_timer_running = 0;
unsig |
ned long g_utvm_stop_time = micros() - g_utvm_start_time_micros;
*elapsed_time_seconds = ((double)g_utvm_stop_time) / 1e6;
return kTvmErrorNoError;
}
tvm_crt_error_t TVMPlatformGenerateRandom(uint8_t* buffer, size_t num_bytes) {
for (size_t i = 0; i < num_bytes; i++) {
buffer[i] = rand();
}
return kTvmErrorNoError;
}
void TVMInitialize() { StackMemoryManager_Init(&app_workspace, g_aot_memory, WORKSPACE_SIZE); }
void TVMExecute(void* input_data, void* output_data) {
int ret_val = tvmgen_default___tvm_main__(input_data, output_data);
if (ret_val != 0) {
TVMPlatformAbort(kTvmErrorPlatformCheckFailure);
}
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#define WORKSPACE_SIZE $workspace_size_bytes
#ifdef __cplusplus
extern "C" {
#endif
void TVMInitialize();
/* TODO template this function signature with the input and output
* data types and sizes. For example:
*
* void TVMExecute(uint8_t input_data[9216], uint8_t output_data[3]);
*
* Note this can only be done once MLF has JSON metadata describing
* inputs and outputs.
*/
void TVMExecute(void* input_data, void* output_data);
#ifdef __cplusplus
} // extern "C"
#endif
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
void TVMPlatformAbort(tvm_crt_error_t error) {
TVMLogf("TVMPlatformAbort: 0x%08x\n", error);
for (;;)
;
}
size_t TVMPlatformFormatMessage(char* out_buf, size_t out_buf_size_bytes, const char* fmt,
va_list args) {
return vsnprintf(out_buf, out_buf_size_bytes, fmt, args);
}
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
if (num_bytes == 0) {
num_bytes = sizeof(int);
}
*out_ptr = malloc(num_bytes);
return (*out_ptr == NULL) ? kTvmErrorPlatformNoMemory : kTvmErrorNoError;
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
free(ptr);
return kTvmErrorNoError;
}
unsigned long g_utvm_start_time_micros;
int g_utvm_timer_running = 0;
tvm_crt_error_t TVMPlatformTimerStart() {
if (g_utvm_timer_running) {
return kTvmErrorPlatformTimerBadState;
}
g_utvm_timer_running = 1;
g_utvm_start_time_micros = micros();
return kTvmErrorNoError;
}
tvm_crt_error_t TVMPlatformTimerStop(double* elapsed_time_seconds) {
if (!g_utvm_timer_running) {
return kTvmErrorPlatformTimerBadState;
}
g_utvm_timer_running = 0;
unsigned long g_utvm_stop_time = micros() - g_utvm_start_time_micros;
*elapsed_time_seconds = ((double)g_utvm_stop_time) |
/ 1e6;
return kTvmErrorNoError;
}
tvm_crt_error_t TVMPlatformGenerateRandom(uint8_t* buffer, size_t num_bytes) {
for (size_t i = 0; i < num_bytes; i++) {
buffer[i] = rand();
}
return kTvmErrorNoError;
} |
import os |
import pathlib |
import re |
import sys
from PIL |
import Image |
import numpy as np
def create_header_file(name, tensor_name, tensor_data, output_path):
"""
This function generates a header file containing the data from the numpy array provided.
"""
file_path = pathlib.Path(f"{output_path}/" + name).resolve()
raw_path = file_path.with_suffix(".h").resolve()
with open(raw_path, "w") as header_file:
header_file.write(
"\n"
+ f"const size_t {tensor_name}_len = {tensor_data.size};\n"
+ f'__attribute__((section(".data.tvm"), aligned(16))) int8_t {tensor_name}[] = "'
)
data_hexstr = tensor_data.tobytes().hex()
for i in range(0, len(data_hexstr), 2):
header_file.write(f"\\x{data_hexstr[i:i+2]}")
header_file.write('";\n\n')
def create_headers(image_name):
"""
This function generates C header files for the input and output arrays required to run inferences
"""
img_path = os.path.join("./", f"{image_name}")
resized_image = Image.open(img_path).resize((224, 224))
img_data = np.asarray(resized_image).astype("float32")
img_data = np.expand_dims(img_data, axis=0)
input_data = img_data - 128
input_data = input_data.astype(np.int8)
create_header_file("inputs", "input", input_data, "./include")
output_data = np.zeros([2], np.int8)
create_header_file(
"outputs",
"output",
output_data,
"./include",
)
if __name__ == "__main__":
create_headers(sys.argv[1]) |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RUNTIME_CRT_CONFIG_H_
#define TVM_RUNTIME_CRT_CONFIG_H_
/*! Log level of the CRT runtime */
#define TVM_CRT_LOG_LEVEL TVM_CRT_LOG_LEVEL_DEBUG
#endif // TVM_RUNTIME_CRT_CONFIG_H_
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/crt/stack_allocator.h>
#ifdef __cplusplus
extern "C" {
#endif
void __attribute__((noreturn)) TVMPlatformAbort(tvm_crt_error_t error_code) {
printf("TVMPlatformAbort: %d\n", error_code);
printf("EXITTHESIM\n");
exit(-1);
}
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
return kTvmErrorFunctionCallNotImplemented;
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
return kTvmErrorFunctionCallNotImplemented;
}
void TVMLogf(const char* msg, ...) {
va_list args;
va_start(args, msg);
vfprintf(stdout, msg, args);
va_end(args);
}
TVM_DLL int TVMFuncRegisterGlobal(const char* name, TVMFunctionHandle f, int override) { return 0; }
#ifdef __cplusplus
}
#endif
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <stdio.h>
#include <tvm_runtime.h>
#include <tvmgen_detection.h>
#include "uart.h"
// Header files generated by convert_image.py
#include "inputs.h"
#include "outputs.h"
int main(int argc, char** argv) {
uart_init();
printf("Starting Demo\n");
printf("Running detection inference\n");
struct tvmgen_detection_outputs detection_outputs = {
.MobilenetV1_Predictions_Reshape_1 = output,
};
struct tvmgen_detection_inputs detection_inputs = {
.input = input,
};
tvmgen_detection_run(&detection_inputs, &detection_outputs);
// Report result
if (output[1] > output[0]) {
printf("Person detected.\n");
} else {
printf("No person detected.\n");
}
// The FVP will shut down when it receives "EXITTHESIM" on the UART
printf("EXITTHESIM\n");
while (1 == 1)
;
return 0;
}
|
import os |
import pathlib |
import re |
import sys
from PIL |
import Image |
import numpy as np
def create_header_file(name, section, tensor_name, tensor_data, output_path):
"""
This function generates a header file containing the data from the numpy array provided.
"""
file_path = pathlib.Path(f"{output_path}/" + name).resolve()
raw_path = file_path.with_suffix(".h").resolve()
with open(raw_path, "w") as header_file:
header_file.write(
"
+ f"const size_t {tensor_name}_len = {tensor_data.size};\n"
+ f'int8_t {tensor_name}[] __attribute__((section("{section}"), aligned(16))) = "'
)
data_hexstr = tensor_data.tobytes().hex()
for i in range(0, len(data_hexstr), 2):
header_file.write(f"\\x{data_hexstr[i:i+2]}")
header_file.write('";\n\n')
def create_headers(image_name):
"""
This function generates C header files for the input and output arrays required to run inferences
"""
img_path = os.path.join("./", f"{image_name}")
resized_image = Image.open(img_path).resize((224, 224))
img_data = np.asarray(resized_image).astype("float32")
img_data = np.expand_dims(img_data, axis=0)
input_data = img_data - 128
input_data = input_data.astype(np.int8)
create_header_file("inputs", "ethosu_scratch", "input", input_data, "./include")
output_data = np.zeros([1001], np.int8)
create_header_file(
"outputs",
"output_data_sec",
"output",
output_data,
"./include",
)
if __name__ == "__main__":
create_headers(sys.argv[1]) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import sys
def create_labels_header(labels_file, section, output_path):
"""
This function generates a header file containing the ImageNet labels as an array of strings
"""
labels_path = pathlib.Path(labels_file).resolve()
file_path = pathlib.Path(f"{output_path}/labels.h").resolve()
with open(labels_path) as f:
labels = f.readlines()
with open(file_path, "w") as header_file:
header_file.write(f'char* labels[] __attribute__((section("{section}"), aligned(16))) = {{')
for _, label in enumerate(labels):
header_file.write(f'"{label.rstrip()}",')
header_file.write("};\n")
if __name__ == "__main__":
create_labels_header(sys.argv[1], "ethosu_scratch", "./include")
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/* Please refer to http:
/******************************************************************************
* Defines
**********SYSTEM_CORE_CLOCK********************************************************************/
/* Hardware features */
/* Scheduling */
/* Stack and heap */
/* OS features */
/* Hooks */
/* Debug features */
if ((x) == 0) { \
taskDISABLE_INTERRUPTS(); \
for (;;) \
; \
}
/* Timers and queues */
/* Task settings */
/* Interrupt settings */
(configLIBRARY_LOWEST_INTERRUPT_PRIORITY << (8 - configPRIO_BITS))
(configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY << (8 - configPRIO_BITS)) |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RUNTIME_CRT_CONFIG_H_
#define TVM_RUNTIME_CRT_CONFIG_H_
/*! Log level of the CRT runtime */
#define TVM_CRT_LOG_LEVEL TVM_CRT_LOG_LEVEL_DEBUG
#endif // TVM_RUNTIME_CRT_CONFIG_H_
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_APPS_MICROTVM_ETHOS_U_ETHOSU_55_H_
#define TVM_APPS_MICROTVM_ETHOS_U_ETHOSU_55_H_
/* Define Arm(R) Ethos(TM)-U55 specific IRQs & base address */
#define ETHOSU_NPU_FAIL (1 << 4)
#define ETHOSU_IRQ ((IRQn_Type)56)
#define ETHOSU_BASE_ADDRESS ((void*)0x48102000)
#endif // TVM_APPS_MICROTVM_ETHOS_U_ETHOSU_55_H_
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_APPS_MICROTVM_ETHOS_U_ETHOSU_MOD_H_
#define TVM_APPS_MICROTVM_ETHOS_U_ETHOSU_MOD_H_
#include <ARMCM55.h>
// TODO: Remove device specific information once RTOS support is available
#include <ethosu_driver.h>
#include <stdio.h>
#include "ethosu_55.h"
struct ethosu_driver ethosu0_driver;
void ethosuIrqHandler0() { ethosu_irq_handler(ðosu0_driver); }
// Initialize Arm(R) Ethos(TM)-U NPU driver
int EthosuInit() {
if (ethosu_init(ðosu0_driver, (void*)ETHOSU_BASE_ADDRESS, NULL, 0, 1, 1)) {
printf("Failed to initialize NPU.\n");
return -1;
}
// Assumes SCB->VTOR points to RW memory
NVIC_SetVector(ETHOSU_IRQ, (uint32_t)ðosuIrqHandler0);
NVIC_EnableIRQ(ETHOSU_IRQ);
return 0;
}
#endif // TVM_APPS_MICROTVM_ETHOS_U_ETHOSU_MOD_H_
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RUNTIME_CONTRIB_ETHOSU_ETHOSU_RUNTIME_H_
#define TVM_RUNTIME_CONTRIB_ETHOSU_ETHOSU_RUNTIME_H_
#include <ethosu_driver.h>
#include <stddef.h>
#include <stdint.h>
typedef void tvm_device_ethos_u_t;
int32_t TVMEthosULaunch(tvm_device_ethos_u_t* resource_handle, void* cms_data, size_t cms_data_size,
uint64_t* base_addrs, size_t* base_addrs_size, int num_tensors);
int32_t TVMDeviceEthosUActivate(tvm_device_ethos_u_t* context);
int32_t TVMDeviceEthosUOpen(tvm_device_ethos_u_t* context);
int32_t TVMDeviceEthosUClose(tvm_device_ethos_u_t* context);
int32_t TVMDeviceEthosUDeactivate(tvm_device_ethos_u_t* context);
#endif // TVM_RUNTIME_CONTRIB_ETHOSU_ETHOSU_RUNTIME_H_
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/crt/stack_allocator.h>
#ifdef __cplusplus
extern "C" {
#endif
void __attribute__((noreturn)) TVMPlatformAbort(tvm_crt_error_t error_code) {
printf("TVMPlatformAbort: %d\n", error_code);
printf("EXITTHESIM\n");
exit(-1);
}
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
return kTvmErrorFunctionCallNotImplemented;
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
return kTvmErrorFunctionCallNotImplemented;
}
void TVMLogf(const char* msg, ...) {
va_list args;
va_start(args, msg);
vfprintf(stdout, msg, args);
va_end(args);
}
TVM_DLL int TVMFuncRegisterGlobal(const char* name, TVMFunctionHandle f, int override) { return 0; }
#ifdef __cplusplus
}
#endif
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <stdio.h>
#include <tvm_runtime.h>
#include "ethosu_mod.h"
#include "uart.h"
// Header files generated by convert_image.py and convert_labels.py
#include "inputs.h"
#include "labels.h"
#include "outputs.h"
int abs(int v) { return v * ((v > 0) - (v < 0)); }
int main(int argc, char** argv) {
uart_init();
printf("Starting Demo\n");
EthosuInit();
printf("Running inference\n");
struct tvmgen_default_outputs outputs = {
.MobilenetV2_Predictions_Reshape_11 = output,
};
struct tvmgen_default_inputs inputs = {
.tfl_quantize = input,
};
struct ethosu_driver* driver = ethosu_reserve_driver();
struct tvmgen_default_devices devices = {
.ethos_u = driver,
};
tvmgen_default_run(&inputs, &outputs, &devices);
ethosu_release_driver(driver);
// Calculate index of max value
int8_t max_value = -128;
int32_t max_index = -1;
for (unsigned int i = 0; i < output_len; ++i) {
if (output[i] > max_value) {
max_value = output[i];
max_index = i;
}
}
printf("The image has been classified as '%s'\n", labels[max_index]);
// The FVP will shut down when it receives "EXITTHESIM" on the UART
printf("EXITTHESIM\n");
while (1 == 1)
;
return 0;
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
static void prvInferenceTask(void* pvParameters);
static void prvDataCollectionTask(void* pvParameters);
/* The queue used to pass data to run through our model */
static QueueHandle_t xQueue = NULL;
int main(void) {
uart_init();
EthosuInit();
xQueue = xQueueCreate(mainQUEUE_LENGTH, sizeof(uint8_t*));
if (xQueue != NULL) {
xTaskCreate(prvInferenceTask, "Inference", mainQUEUE_INFERENCE_TASK_STACK_SIZE, NULL,
mainQUEUE_INFERENCE_TASK_PRIORITY, NULL);
xTaskCreate(prvDataCollectionTask, "Data", mainQUEUE_DATA_TASK_STACK_SIZE, NULL,
mainQUEUE_DATA_TASK_PRIORITY, NULL);
vTaskStartScheduler();
}
printf("Unreachable code reached!\n");
}
/*
* This task emulates collection of data and sending it to another inference task
* for processing
*/
static void prvDataCollectionTask(void* pvParameters) {
(void)pvParameters;
vTaskDelay(mainQUEUE_SEND_FREQUENCY_MS);
uint8_t** pucInputData = &input;
xQueueSend(xQueue, &pucInputData, 0U);
}
/*
* This task emulates the inference of data sent by the collector task
*/
static void prvInferenceTask(void* pvParameters) {
uint8_t* pucReceivedData;
(void)pvParameters;
xQueueReceive(xQueue |
, &pucReceivedData, portMAX_DELAY);
printf("Running inference\n");
struct tvmgen_default_inputs xInputs = {
.tfl_quantize = pucReceivedData,
};
struct tvmgen_default_outputs xOutputs = {
.MobilenetV2_Predictions_Reshape_11 = output,
};
struct ethosu_driver* xDriver = ethosu_reserve_driver();
struct tvmgen_default_devices xDevices = {
.ethos_u = xDriver,
};
tvmgen_default_run(&xInputs, &xOutputs, &xDevices);
ethosu_release_driver(xDriver);
int8_t ucMaxValue = -128;
int32_t lMaxIndex = -1;
for (unsigned int i = 0; i < output_len; ++i) {
if (output[i] > ucMaxValue) {
ucMaxValue = output[i];
lMaxIndex = i;
}
}
printf("The image has been classified as '%s'\n", labels[lMaxIndex]);
printf("EXITTHESIM\n");
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include "tvm_ethosu_runtime.h"
#include <ethosu_driver.h>
int32_t TVMEthosULaunch(tvm_device_ethos_u_t* context, void* cms_data, size_t cms_data_size,
uint64_t* base_addrs, size_t* base_addrs_size, int num_tensors) {
struct ethosu_driver* driver = (struct ethosu_driver*)context;
int32_t result =
ethosu_invoke(driver, cms_data, cms_data_size, base_addrs, base_addrs_size, num_tensors);
// Map errors in invoke to TVM errors
if (result != 0) {
return -1;
}
return 0;
}
int32_t TVMDeviceEthosUActivate(tvm_device_ethos_u_t* context) { return 0; }
int32_t TVMDeviceEthosUOpen(tvm_device_ethos_u_t* context) { return 0; }
int32_t TVMDeviceEthosUClose(tvm_device_ethos_u_t* context) { return 0; }
int32_t TVMDeviceEthosUDeactivate(tvm_device_ethos_u_t* context) { return 0; }
|
import argparse |
import copy |
import json |
import logging |
import pathlib |
import os |
import re |
import shlex |
import shutil |
import subprocess |
import sys |
import pathlib
_LOG = logging.getLogger(__name__)
THIS_DIR = pathlib.Path(os.path.realpath(os.path.dirname(__file__)))
ALL_PROVIDERS = (
"parallels",
"virtualbox",
"vmware_desktop",
)
ALL_PLATFORMS = (
"arduino",
"zephyr",
)
EXTRA_SCRIPTS = [
"apps/microtvm/reference-vm/base-box/base_box_setup_common.sh",
"docker/install/ubuntu_install_core.sh",
"docker/install/ubuntu_install_python.sh",
"docker/utils/apt-install-and-clear.sh",
"docker/install/ubuntu1804_install_llvm.sh",
"docker/install/ubuntu_init_zephyr_project.sh",
"docker/install/ubuntu_install_zephyr_sdk.sh",
"docker/install/ubuntu_install_cmsis.sh",
"docker/install/ubuntu_install_nrfjprog.sh",
]
PACKER_FILE_NAME = "packer.json"
with open(THIS_DIR / ".." / "zephyr" / "template_project" / "boards.json") as f:
zephyr_boards = json.load(f)
with open(THIS_DIR / ".." / "arduino" / "template_project" / "boards.json") as f:
arduino_boards = json.load(f)
ALL_MICROTVM_BOARDS = {
"arduino": arduino_boards.keys(),
"zephyr": zephyr_boards.keys(),
}
def parse_virtualbox_devices():
output = subprocess.check_output(["VBoxManage", "list", "usbhost"], encoding="utf-8")
devices = []
current_dev = {}
for line in output.split("\n"):
if not line.strip():
if current_dev:
if "VendorId" in current_dev and "ProductId" in current_dev:
devices.append(current_dev)
current_dev = {}
continue
key, value = line.split(":", 1)
value = value.lstrip(" ")
current_dev[key] = value
if current_dev:
devices.append(current_dev)
return devices
VIRTUALBOX_USB_DEVICE_RE = (
"USBAttachVendorId[0-9]+=0x([0-9a-z]{4})\n" + "USBAttachProductId[0-9]+=0x([0-9a-z]{4})"
)
def parse_virtualbox_attached_usb_devices(vm_uuid):
output = subprocess.check_output(
["VBoxManage", "showvminfo", "--machinereadable", vm_uuid], encoding="utf-8"
)
r = re.compil |
e(VIRTUALBOX_USB_DEVICE_RE)
attached_usb_devices = r.findall(output, re.MULTILINE)
return attached_usb_devices
VIRTUALBOX_VID_PID_RE = re.compile(r"0x([0-9A-Fa-f]{4}).*")
def attach_virtualbox(vm_uuid, vid_hex=None, pid_hex=None, serial=None):
usb_devices = parse_virtualbox_devices()
for dev in usb_devices:
m = VIRTUALBOX_VID_PID_RE.match(dev["VendorId"])
if not m:
_LOG.warning("Malformed VendorId: %s", dev["VendorId"])
continue
dev_vid_hex = m.group(1).lower()
m = VIRTUALBOX_VID_PID_RE.match(dev["ProductId"])
if not m:
_LOG.warning("Malformed ProductId: %s", dev["ProductId"])
continue
dev_pid_hex = m.group(1).lower()
if (
vid_hex == dev_vid_hex
and pid_hex == dev_pid_hex
and (serial is None or serial == dev["SerialNumber"])
):
attached_devices = parse_virtualbox_attached_usb_devices(vm_uuid)
for vid, pid in parse_virtualbox_attached_usb_devices(vm_uuid):
if vid_hex == vid and pid_hex == pid:
print(f"USB dev {vid_hex}:{pid_hex} already attached. Skipping attach.")
return
rule_args = [
"VBoxManage",
"usbfilter",
"add",
"0",
"--action",
"hold",
"--name",
"test device",
"--target",
vm_uuid,
"--vendorid",
vid_hex,
"--productid",
pid_hex,
]
if serial is not None:
rule_args.extend(["--serialnumber", serial])
subprocess.check_call(rule_args)
subprocess.check_call(["VBoxManage", "controlvm", vm_uuid, "usbattach", dev["UUID"]])
return
raise Exception(
f"Device with vid={vid_hex}, pid={pid_hex}, serial={serial!r} not found:\n{usb_devices!r}"
) |
def attach_parallels(uuid, vid_hex=None, pid_hex=None, serial=None):
usb_devices = json.loads(
subprocess.check_output(["prlsrvctl", "usb", "list", "-j"], encoding="utf-8")
)
for dev in usb_devices:
_, dev_vid_hex, dev_pid_hex, _, _, dev_serial = dev["System name"].split("|")
dev_vid_hex = dev_vid_hex.lower()
dev_pid_hex = dev_pid_hex.lower()
if (
vid_hex == dev_vid_hex
and pid_hex == dev_pid_hex
and (serial is None or serial == dev_serial)
):
subprocess.check_call(["prlsrvctl", "usb", "set", dev["Name"], uuid])
if "Used-By-Vm-Name" in dev:
subprocess.check_call(
["prlctl", "set", dev["Used-By-Vm-Name"], "--device-disconnect", dev["Name"]]
)
subprocess.check_call(["prlctl", "set", uuid, "--device-connect", dev["Name"]])
return
raise Exception(
f"Device with vid={vid_hex}, pid={pid_hex}, serial={serial!r} not found:\n{usb_devices!r}"
)
def attach_vmware(uuid, vid_hex=None, pid_hex=None, serial=None):
print("NOTE: vmware doesn't seem to support automatic attaching of devices :(")
print("The VMWare VM UUID is {uuid}")
print("Please attach the following usb device using the VMWare GUI:")
if vid_hex is not None:
print(f" - VID: {vid_hex}")
if pid_hex is not None:
print(f" - PID: {pid_hex}")
if serial is not None:
print(f" - Serial: {serial}")
if vid_hex is None and pid_hex is None and serial is None:
print(" - (no specifications given for USB device)")
print()
print("Press [Enter] when the USB device is attached")
input()
ATTACH_USB_DEVICE = {
"parallels": attach_parallels,
"virtualbox": attach_virtualbox,
"vmware_desktop": attach_vmware,
}
def generate_packer_config(file_path, providers):
builders = []
provisioners = []
for provider_name in providers:
builders.append(
{
"n |
ame": f"{provider_name}",
"type": "vagrant",
"box_name": f"microtvm-base-{provider_name}",
"output_dir": f"output-packer-{provider_name}",
"communicator": "ssh",
"source_path": "generic/ubuntu1804",
"provider": provider_name,
"template": "Vagrantfile.packer-template",
}
)
repo_root = subprocess.check_output(
["git", "rev-parse", "--show-toplevel"], encoding="utf-8"
).strip()
scripts_to_copy = EXTRA_SCRIPTS
for script in scripts_to_copy:
script_path = os.path.join(repo_root, script)
filename = os.path.basename(script_path)
provisioners.append({"type": "file", "source": script_path, "destination": f"~/{filename}"})
provisioners.append(
{
"type": "shell",
"script": "base_box_setup.sh",
}
)
provisioners.append(
{
"type": "shell",
"script": "base_box_provision.sh",
}
)
with open(file_path, "w") as f:
json.dump(
{
"builders": builders,
"provisioners": provisioners,
},
f,
sort_keys=True,
indent=2,
)
def build_command(args):
base_box_dir = THIS_DIR / "base-box"
generate_packer_config(
os.path.join(base_box_dir, PACKER_FILE_NAME),
args.provider or ALL_PROVIDERS,
)
env = copy.copy(os.environ)
packer_args = ["packer", "build", "-force"]
env["PACKER_LOG"] = "1"
env["PACKER_LOG_PATH"] = "packer.log"
if args.debug_packer:
packer_args += ["-debug"]
packer_args += [PACKER_FILE_NAME]
box_package_exists = False
if not args.force:
box_package_dirs = [(base_box_dir / f"output-packer-{p}") for p in args.provider]
for box_package_dir in box_package_dirs:
if box_package_dir.exists():
print(f"A box package {box_package_dir} already exist |
s. Refusing to overwrite it!")
box_package_exists = True
if box_package_exists:
sys.exit("One or more box packages exist (see list above). To rebuild use '--force'")
subprocess.check_call(packer_args, cwd=THIS_DIR / "base-box", env=env)
REQUIRED_TEST_CONFIG_KEYS = {
"vid_hex": str,
"pid_hex": str,
}
VM_BOX_RE = re.compile(r'(.*\.vm\.box) = "(.*)"')
VM_TVM_HOME_RE = re.compile(r'(.*tvm_home) = "(.*)"')
SKIP_COPY_PATHS = [".vagrant", "base-box", "scripts"]
def do_build_release_test_vm(
release_test_dir, user_box_dir: pathlib.Path, base_box_dir: pathlib.Path, provider_name
):
if os.path.exists(release_test_dir):
try:
subprocess.check_call(["vagrant", "destroy", "-f"], cwd=release_test_dir)
except subprocess.CalledProcessError:
_LOG.warning("vagrant destroy failed--removing dirtree anyhow", exc_info=True)
shutil.rmtree(release_test_dir)
for dirpath, _, filenames in os.walk(user_box_dir):
rel_path = os.path.relpath(dirpath, user_box_dir)
if any(
rel_path == scp or rel_path.startswith(f"{scp}{os.path.sep}") for scp in SKIP_COPY_PATHS
):
continue
dest_dir = os.path.join(release_test_dir, rel_path)
os.makedirs(dest_dir)
for filename in filenames:
shutil.copy2(os.path.join(dirpath, filename), os.path.join(dest_dir, filename))
release_test_vagrantfile = os.path.join(release_test_dir, "Vagrantfile")
with open(release_test_vagrantfile) as f:
lines = list(f)
found_box_line = False
with open(release_test_vagrantfile, "w") as f:
for line in lines:
if "config.vm.box_version" in line:
continue
m = VM_BOX_RE.match(line)
tvm_home_m = VM_TVM_HOME_RE.match(line)
if tvm_home_m:
f.write(f'{tvm_home_m.group(1)} = "../../../.."\n')
continue
if not m:
f.write( |
line)
continue
box_package = os.path.join(
base_box_dir, f"output-packer-{provider_name}", "package.box"
)
box_relpath = os.path.relpath(box_package, release_test_dir)
f.write(f'{m.group(1)} = "{box_relpath}"\n')
found_box_line = True
if not found_box_line:
_LOG.error(
"testing provider %s: couldn't find config.box.vm = line in Vagrantfile; unable to test",
provider_name,
)
return False
remove_args = ["vagrant", "box", "remove", box_relpath]
return_code = subprocess.call(remove_args, cwd=release_test_dir)
assert return_code in (0, 1), f'{" ".join(remove_args)} returned exit code {return_code}'
subprocess.check_call(["vagrant", "up", f"--provider={provider_name}"], cwd=release_test_dir)
return True
def do_run_release_test(release_test_dir, provider_name, test_config, test_device_serial):
with open(
os.path.join(release_test_dir, ".vagrant", "machines", "default", provider_name, "id")
) as f:
machine_uuid = f.read()
if test_config["vid_hex"] and test_config["pid_hex"]:
ATTACH_USB_DEVICE[provider_name](
machine_uuid,
vid_hex=test_config["vid_hex"],
pid_hex=test_config["pid_hex"],
serial=test_device_serial,
)
tvm_home = os.path.realpath(THIS_DIR / ".." / ".." / "..")
def _quote_cmd(cmd):
return " ".join(shlex.quote(a) for a in cmd)
test_cmd = (
_quote_cmd(["cd", tvm_home])
+ " && "
+ _quote_cmd(
[
f"apps/microtvm/reference-vm/base-box/base_box_test.sh",
test_config["microtvm_board"],
]
)
)
subprocess.check_call(["vagrant", "ssh", "-c", f"bash -ec '{test_cmd}'"], cwd=release_test_dir)
def test_command(args):
user_box_dir = THIS_DIR
base_box_dir = user_box_dir / "base-box"
boards_file = THIS_DIR / ".." / args.platform / "t |
emplate_project" / "boards.json"
with open(boards_file) as f:
test_config = json.load(f)
microtvm_test_config = test_config[args.microtvm_board]
for key, expected_type in REQUIRED_TEST_CONFIG_KEYS.items():
assert key in microtvm_test_config and isinstance(
microtvm_test_config[key], expected_type
), f"Expected key {key} of type {expected_type} in {boards_file}: {test_config!r}"
microtvm_test_config["vid_hex"] = microtvm_test_config["vid_hex"].lower()
microtvm_test_config["pid_hex"] = microtvm_test_config["pid_hex"].lower()
microtvm_test_config["microtvm_board"] = args.microtvm_board
providers = args.provider
release_test_dir = THIS_DIR / f"release-test"
if args.skip_build or args.skip_destroy:
assert (
len(providers) == 1
), "--skip-build and/or --skip-destroy was given, but >1 provider specified"
test_failed = False
for provider_name in providers:
try:
if not args.skip_build:
do_build_release_test_vm(
release_test_dir, user_box_dir, base_box_dir, provider_name
)
do_run_release_test(
release_test_dir,
provider_name,
microtvm_test_config,
args.test_device_serial,
)
except subprocess.CalledProcessError:
test_failed = True
sys.exit(
f"\n\nERROR: Provider '{provider_name}' failed the release test. "
"You can re-run it to reproduce the issue without building everything "
"again by passing the --skip-build and specifying only the provider that failed. "
"The VM is still running in case you want to connect it via SSH to "
"investigate further the issue, thus it's necessary to destroy it manually "
"to release the resources back to the host, like a USB device attached to the VM." |
)
finally:
if not (args.skip_destroy or test_failed):
subprocess.check_call(["vagrant", "destroy", "-f"], cwd=release_test_dir)
shutil.rmtree(release_test_dir)
print(f'\n\nThe release tests passed on all specified providers: {", ".join(providers)}.')
def release_command(args):
if args.release_full_name:
vm_name = args.release_full_name
else:
vm_name = "tlcpack/microtvm"
if not args.skip_creating_release_version:
subprocess.check_call(
[
"vagrant",
"cloud",
"version",
"create",
vm_name,
args.release_version,
]
)
if not args.release_version:
sys.exit(f"--release-version must be specified")
for provider_name in args.provider:
subprocess.check_call(
[
"vagrant",
"cloud",
"publish",
"-f",
vm_name,
args.release_version,
provider_name,
str(THIS_DIR / "base-box" / f"output-packer-{provider_name}/package.box"),
]
)
def parse_args():
parser = argparse.ArgumentParser(
description="Automates building, testing, and releasing a base box"
)
subparsers = parser.add_subparsers(help="Action to perform.")
subparsers.required = True
subparsers.dest = "action"
parser.add_argument(
"--provider",
choices=ALL_PROVIDERS,
action="append",
required=True,
help="Name of the provider or providers to act on",
)
platform_help_str = "Platform to use (e.g. Arduino, Zephyr)"
parser_build = subparsers.add_parser("build", help="Build a base box.")
parser_build.set_defaults(func=build_command)
parser_build.add_argument(
"--debug-packer",
action="store_true",
help=("Run |
packer in debug mode, and write log to the base-box directory."),
)
parser_build.add_argument(
"--force",
action="store_true",
help=("Force rebuilding a base box from scratch if one already exists."),
)
parser_test = subparsers.add_parser("test", help="Test a base box before release.")
parser_test.set_defaults(func=test_command)
parser_test.add_argument(
"--skip-build",
action="store_true",
help=(
"If given, assume a box has already been built in the release-test subdirectory, "
"so use that box to execute the release test script. If the tests fail the VM used "
"for testing will be left running for further investigation and will need to be "
"destroyed manually. If all tests pass on all specified providers no VM is left running, "
"unless --skip-destroy is given too."
),
)
parser_test.add_argument(
"--skip-destroy",
action="store_true",
help=(
"Skip destroying the test VM even if all tests pass. Can only be used if a single "
"provider is specified. Default is to destroy the VM if all tests pass (and always "
"skip destroying it if a test fails)."
),
)
parser_test.add_argument(
"--test-device-serial",
help=(
"If given, attach the test device with this USB serial number. Corresponds to the "
"iSerial field from `lsusb -v` output."
),
)
parser_test_platform_subparsers = parser_test.add_subparsers(help=platform_help_str)
for platform in ALL_PLATFORMS:
platform_specific_parser = parser_test_platform_subparsers.add_parser(platform)
platform_specific_parser.set_defaults(platform=platform)
platform_specific_parser.add_argument(
"--microtvm-board",
choices=ALL_MICROTVM_BOARDS[platform],
required=True,
help="MicroTVM board used for testing.",
)
par |
ser_release = subparsers.add_parser("release", help="Release base box to cloud.")
parser_release.set_defaults(func=release_command)
parser_release.add_argument(
"--release-version",
required=True,
help="Version to release, in the form 'x.y.z'. Must be specified with release.",
)
parser_release.add_argument(
"--skip-creating-release-version",
action="store_true",
help="Skip creating the version and just upload for this provider.",
)
parser_release.add_argument(
"--release-full-name",
required=False,
type=str,
default=None,
help=(
"If set, it will use this as the full release name and version for the box. "
"If this set, it will ignore `--release-version`."
),
)
args = parser.parse_args()
return args
def main():
args = parse_args()
args.func(args)
if __name__ == "__main__":
main() |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file tvm/runtime/crt_config.h.template
* \brief Template for CRT configuration, to be modified on each target.
*/
/*! Log level of the CRT runtime */
/*! Maximum supported dimension in NDArray */
/*! Maximum supported arguments in generated functions */
/*! Size of the global function registry, in bytes. */
/*! Maximum number of registered modules. */
/*! Maximum packet size, in bytes, including the length header. */
/*! Maximum supported string length in dltype, e.g. "int8", "int16", "float32" */
/*! Maximum supported string length in function names */
/*! Maximum supported string length in parameter names */
/*! \brief Maximum length of a PackedFunc function name. */
/*! \brief Log2 of the page size (bytes) for a virtual memory page. */
/*! \brief Number of pages on device. */ |
import atexit |
import collections |
import collections.abc |
import enum |
import fcntl |
import json |
import logging |
import os |
import os.path |
import pathlib |
import queue |
import re |
import shlex |
import shutil |
import struct |
import subprocess |
import sys |
import tarfile |
import tempfile |
import threading
from typing |
import Union |
import usb |
import psutil |
import stat |
import serial |
import serial.tools.list_ports |
import yaml
from tvm.micro.project_api |
import server
_LOG = logging.getLogger(__name__)
API_SERVER_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
BUILD_DIR = API_SERVER_DIR / "build"
MODEL_LIBRARY_FORMAT_RELPATH = "model.tar"
IS_TEMPLATE = not (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH).exists()
BOARDS = API_SERVER_DIR / "boards.json"
CMAKELIST_FILENAME = "CMakeLists.txt"
ZEPHYR_VERSION = 2.7
WEST_CMD = default = sys.executable + " -m west" if sys.executable else None
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
try:
with open(BOARDS) as boards:
BOARD_PROPERTIES = json.load(boards)
except FileNotFoundError:
raise FileNotFoundError(f"Board file {{{BOARDS}}} does not exist.")
def check_call(cmd_args, *args, **kwargs):
cwd_str = "" if "cwd" not in kwargs else f" (in cwd: {kwargs['cwd']})"
_LOG.info("run%s: %s", cwd_str, " ".join(shlex.quote(a) for a in cmd_args))
return subprocess.check_call(cmd_args, *args, **kwargs)
CACHE_ENTRY_RE = re.compile(r"(?P<name>[^:]+):(?P<type>[^=]+)=(?P<value>.*)")
CMAKE_BOOL_MAP = dict(
[(k, True) for k in ("1", "ON", "YES", "TRUE", "Y")]
+ [(k, False) for k in ("0", "OFF", "NO", "FALSE", "N", "IGNORE", "NOTFOUND", "")]
) |
class CMakeCache(collections.abc.Mapping):
def __init__(self, path):
self._path = path
self._dict = None
def __iter__(self):
return iter(self._dict)
def __getitem__(self, key):
if self._dict is None:
self._dict = self._read_cmake_cache()
return self._dict[key]
def __len__(self):
return len(self._dict)
def _read_cmake_cache(self):
"""Read a CMakeCache.txt-like file and return a dictionary of values."""
entries = collections.OrderedDict()
with open(self._path, encoding="utf-8") as f:
for line in f:
m = CACHE_ENTRY_RE.match(line.rstrip("\n"))
if not m:
continue
if m.group("type") == "BOOL":
value = CMAKE_BOOL_MAP[m.group("value").upper()]
else:
value = m.group("value")
entries[m.group("name")] = value
return entries
CMAKE_CACHE = CMakeCache(BUILD_DIR / "CMakeCache.txt") |
class BoardError(Exception):
"""Raised when an attached board cannot be opened (i.e. missing /dev nodes, etc).""" |
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the board= given to ZephyrCompiler."""
def _get_flash_runner():
flash_runner = CMAKE_CACHE.get("ZEPHYR_BOARD_FLASH_RUNNER")
if flash_runner is not None:
return flash_runner
with open(CMAKE_CACHE["ZEPHYR_RUNNERS_YAML"]) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
return doc["flash-runner"]
def _find_board_from_cmake_file(cmake_file: Union[str, pathlib.Path]) -> str:
"""Find Zephyr board from generated CMakeLists.txt"""
zephyr_board = None
with open(cmake_file) as cmake_f:
for line in cmake_f:
if line.startswith("set(BOARD"):
zephyr_board = line.strip("\n").strip("\r").strip(")").split(" ")[1]
break
if not zephyr_board:
raise RuntimeError(f"No Zephyr board set in the {cmake_file}.")
return zephyr_board
def _find_platform_from_cmake_file(cmake_file: Union[str, pathlib.Path]) -> str:
emu_platform = None
with open(API_SERVER_DIR / CMAKELIST_FILENAME) as cmake_f:
for line in cmake_f:
set_platform = re.match("set\(EMU_PLATFORM (.*)\)", line)
if set_platform:
emu_platform = set_platform.group(1)
break
return emu_platform
def _get_device_args(options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return _get_nrf_device_args(options)
if flash_runner == "openocd":
return _get_openocd_device_args(options)
raise BoardError(
f"Don't know how to find serial terminal for board {_find_board_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)} with flash "
f"runner {flash_runner}"
)
def _get_board_mem_size_bytes(options):
board_file_path = (
pathlib.Path(get_zephyr_base(options))
/ "boards"
/ "arm"
/ options["board"]
/ (options["board"] + ".yaml")
)
try:
with open(board_file_path) as f: |
board_data = yaml.load(f, Loader=yaml.FullLoader)
return int(board_data["ram"]) * 1024
except:
_LOG.warning("Board memory information is not available.")
return None
DEFAULT_HEAP_SIZE_BYTES = 216 * 1024
def _get_recommended_heap_size_bytes(options):
prop = BOARD_PROPERTIES[options["board"]]
if "recommended_heap_size_bytes" in prop:
return prop["recommended_heap_size_bytes"]
return DEFAULT_HEAP_SIZE_BYTES
def generic_find_serial_port(serial_number=None):
"""Find a USB serial port based on its serial number or its VID:PID.
This method finds a USB serial port device path based on the port's serial number (if given) or
based on the board's idVendor and idProduct ids.
Parameters
----------
serial_number : str
The serial number associated to the USB serial port which the board is attached to. This is
the same number as shown by 'lsusb -v' in the iSerial field.
Returns
-------
Path to the USB serial port device, for example /dev/ttyACM1.
"""
if serial_number:
regex = serial_number
else:
prop = BOARD_PROPERTIES[_find_board_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)]
device_id = ":".join([prop["vid_hex"], prop["pid_hex"]])
regex = device_id
serial_ports = list(serial.tools.list_ports.grep(regex))
if len(serial_ports) == 0:
raise Exception(f"No serial port found for board {prop['board']}!")
if len(serial_ports) != 1:
ports_lst = ""
for port in serial_ports:
ports_lst += f"Serial port: {port.device}, serial number: {port.serial_number}\n"
raise Exception("Expected 1 serial port, found multiple ports:\n {ports_lst}")
return serial_ports[0].device
def _get_openocd_device_args(options):
serial_number = options.get("openocd_serial")
return ["--serial", generic_find_serial_port(serial_number)]
def _get_nrf_device_args(options):
nrfjprog_args = ["nrfjprog", "--ids"]
nrfjprog_ids = subpr |
ocess.check_output(nrfjprog_args, encoding="utf-8")
if not nrfjprog_ids.strip("\n"):
raise BoardAutodetectFailed(f'No attached boards recognized by {" ".join(nrfjprog_args)}')
boards = nrfjprog_ids.split("\n")[:-1]
if len(boards) > 1:
if options["nrfjprog_snr"] is None:
raise BoardError(
"Multiple boards connected; specify one with nrfjprog_snr=: " f'{", ".join(boards)}'
)
if str(options["nrfjprog_snr"]) not in boards:
raise BoardError(
f"nrfjprog_snr ({options['nrfjprog_snr']}) not found in {nrfjprog_args}: {boards}"
)
return ["--snr", options["nrfjprog_snr"]]
if not boards:
return []
return ["--snr", boards[0]]
PROJECT_TYPES = []
if IS_TEMPLATE:
for d in (API_SERVER_DIR / "src").iterdir():
if d.is_dir():
PROJECT_TYPES.append(d.name)
PROJECT_OPTIONS = server.default_project_options(
project_type={"choices": tuple(PROJECT_TYPES)},
board={"choices": list(BOARD_PROPERTIES)},
verbose={"optional": ["generate_project"]},
) + [
server.ProjectOption(
"gdbserver_port",
optional=["open_transport"],
type="int",
default=None,
help=("If given, port number to use when running the local gdbserver."),
),
server.ProjectOption(
"nrfjprog_snr",
optional=["open_transport"],
type="int",
default=None,
help=("When used with nRF targets, serial
),
server.ProjectOption(
"openocd_serial",
optional=["open_transport"],
type="int",
default=None,
help=("When used with OpenOCD targets, serial
),
server.ProjectOption(
"west_cmd",
optional=["generate_project"],
type="str",
default=WEST_CMD,
help=(
"Path to the west tool. If given, supersedes both the zephyr_base "
"option and ZEPHYR_BASE environment variable."
),
),
server.ProjectOption( |
"zephyr_base",
required=(["generate_project", "open_transport"] if not ZEPHYR_BASE else None),
optional=(["generate_project", "open_transport"] if ZEPHYR_BASE else ["build"]),
type="str",
default=ZEPHYR_BASE,
help="Path to the zephyr base directory.",
),
server.ProjectOption(
"config_main_stack_size",
optional=["generate_project"],
type="int",
default=None,
help="Sets CONFIG_MAIN_STACK_SIZE for Zephyr board.",
),
server.ProjectOption(
"arm_fvp_path",
optional=["generate_project", "open_transport"],
type="str",
default=None,
help="Path to the FVP binary to invoke.",
),
server.ProjectOption(
"use_fvp",
optional=["generate_project"],
type="bool",
default=False,
help="Run on the FVP emulator instead of hardware.",
),
server.ProjectOption(
"heap_size_bytes",
optional=["generate_project"],
type="int",
default=None,
help="Sets the value for HEAP_SIZE_BYTES passed to K_HEAP_DEFINE() to service TVM memory allocation requests.",
),
]
def get_zephyr_base(options: dict):
"""Returns Zephyr base path"""
zephyr_base = options.get("zephyr_base", ZEPHYR_BASE)
assert zephyr_base, "'zephyr_base' option not passed and not found by default!"
return zephyr_base
def get_cmsis_path(options: dict) -> pathlib.Path:
"""Returns CMSIS dependency path"""
cmsis_path = options.get("cmsis_path")
assert cmsis_path, "'cmsis_path' option not passed!"
return pathlib.Path(cmsis_path) |
class Handler(server.ProjectAPIHandler):
def __init__(self):
super(Handler, self).__init__()
self._proc = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="zephyr",
is_template=IS_TEMPLATE,
model_library_format_path=""
if IS_TEMPLATE
else (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH),
project_options=PROJECT_OPTIONS,
)
CRT_COPY_ITEMS = ("include", "Makefile", "src")
EXTRA_PRJ_CONF_DIRECTIVES = {
"CONFIG_TIMER_RANDOM_GENERATOR=y": (
"qemu_x86",
"qemu_riscv32",
"qemu_cortex_r5",
"qemu_riscv64",
),
"CONFIG_ENTROPY_GENERATOR=y": (
"mps2_an521",
"nrf5340dk_nrf5340_cpuapp",
"nucleo_f746zg",
"nucleo_l4r5zi",
"stm32f746g_disco",
),
}
def _create_prj_conf(self, project_dir, options):
zephyr_board = options["board"]
with open(project_dir / "prj.conf", "w") as f:
f.write(
"
"CONFIG_RING_BUFFER=y\n"
"CONFIG_UART_CONSOLE=n\n"
"CONFIG_UART_INTERRUPT_DRIVEN=y\n"
"\n"
)
f.write("
if options["project_type"] == "host_driven":
f.write(
"CONFIG_TIMING_FUNCTIONS=y\n"
"
"CONFIG_CPLUSPLUS=y\n"
"CONFIG_LIB_CPLUSPLUS=y\n"
"\n"
)
f.write("
if self._has_fpu(zephyr_board):
f.write("
if options.get("config_main_stack_size") is not None:
f.write(f"CONFIG_MAIN_STACK_SIZE={options['config_main_stack_size']}\n")
f.write("
f.write("\n
for line, board_list in self.EXTRA_PRJ_CONF_DIRECTIVES.items():
if zephyr_board in board_l |
ist:
f.write(f"{line}\n")
if zephyr_board not in ["qemu_riscv64"]:
f.write("
f.write("\n")
API_SERVER_CRT_LIBS_TOKEN = "<API_SERVER_CRT_LIBS>"
CMAKE_ARGS_TOKEN = "<CMAKE_ARGS>"
QEMU_PIPE_TOKEN = "<QEMU_PIPE>"
CMSIS_PATH_TOKEN = "<CMSIS_PATH>"
CRT_LIBS_BY_PROJECT_TYPE = {
"host_driven": "microtvm_rpc_server microtvm_rpc_common aot_executor_module aot_executor common",
"aot_standalone_demo": "memory microtvm_rpc_common common",
}
def _get_platform_version(self, zephyr_base: str) -> float:
with open(pathlib.Path(zephyr_base) / "VERSION", "r") as f:
lines = f.readlines()
for line in lines:
line = line.replace(" ", "").replace("\n", "").replace("\r", "")
if "VERSION_MAJOR" in line:
version_major = line.split("=")[1]
if "VERSION_MINOR" in line:
version_minor = line.split("=")[1]
return float(f"{version_major}.{version_minor}")
def _cmsis_required(self, project_path: Union[str, pathlib.Path]) -> bool:
"""Check if CMSIS dependency is required."""
project_path = pathlib.Path(project_path)
for path in (project_path / "codegen" / "host" / "src").iterdir():
if path.is_file():
with open(path, "r") as lib_f:
lib_content = lib_f.read()
if any(
header in lib_content
for header in [
"<arm_nnsupportfunctions.h>",
"arm_nn_types.h",
"arm_nnfunctions.h",
]
):
return True
return False
def _generate_cmake_args(self, mlf_extracted_path, options) -> str:
cmake_args = "\n
if options.get("verbose"):
cmake_args += "set(CMAKE_VERBOSE_MAKEFILE TRUE)\n"
if options.get("zephyr_base"): |
cmake_args += f"set(ZEPHYR_BASE {options['zephyr_base']})\n"
if options.get("west_cmd"):
cmake_args += f"set(WEST {options['west_cmd']})\n"
if self._is_qemu(options["board"], options.get("use_fvp")):
cmake_args += f"set(EMU_PLATFORM qemu)\n"
if self._is_fvp(options["board"], options.get("use_fvp")):
cmake_args += "set(EMU_PLATFORM armfvp)\n"
cmake_args += "set(ARMFVP_FLAGS -I)\n"
cmake_args += f"set(BOARD {options['board']})\n"
enable_cmsis = self._cmsis_required(mlf_extracted_path)
if enable_cmsis:
assert os.environ.get("CMSIS_PATH"), "CMSIS_PATH is not defined."
cmake_args += f"set(ENABLE_CMSIS {str(enable_cmsis).upper()})\n"
return cmake_args
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
zephyr_board = options["board"]
version = self._get_platform_version(get_zephyr_base(options))
if version != ZEPHYR_VERSION:
message = f"Zephyr version found is not supported: found {version}, expected {ZEPHYR_VERSION}."
if options.get("warning_as_error") is not None and options["warning_as_error"]:
raise server.ServerError(message=message)
_LOG.warning(message)
project_dir = pathlib.Path(project_dir)
project_dir.mkdir()
shutil.copy2(__file__, project_dir / os.path.basename(__file__))
shutil.copy2(BOARDS, project_dir / BOARDS.name)
board_overlay_path = API_SERVER_DIR / "app-overlay" / f"{zephyr_board}.overlay"
if board_overlay_path.exists():
shutil.copy2(board_overlay_path, project_dir / f"{zephyr_board}.overlay")
project_model_library_format_tar_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH
shutil.copy2(model_library_format_path, project_model_library_format_tar_path)
extract_pat |
h = os.path.splitext(project_model_library_format_tar_path)[0]
with tarfile.TarFile(project_model_library_format_tar_path) as tf:
os.makedirs(extract_path)
tf.extractall(path=extract_path)
if self._is_qemu(zephyr_board, options.get("use_fvp")):
shutil.copytree(API_SERVER_DIR / "qemu-hack", project_dir / "qemu-hack")
elif self._is_fvp(zephyr_board, options.get("use_fvp")):
shutil.copytree(API_SERVER_DIR / "fvp-hack", project_dir / "fvp-hack")
crt_path = project_dir / "crt"
crt_path.mkdir()
for item in self.CRT_COPY_ITEMS:
src_path = os.path.join(standalone_crt_dir, item)
dst_path = crt_path / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
with open(project_dir / CMAKELIST_FILENAME, "w") as cmake_f:
with open(API_SERVER_DIR / f"{CMAKELIST_FILENAME}.template", "r") as cmake_template_f:
for line in cmake_template_f:
if self.API_SERVER_CRT_LIBS_TOKEN in line:
crt_libs = self.CRT_LIBS_BY_PROJECT_TYPE[options["project_type"]]
line = line.replace("<API_SERVER_CRT_LIBS>", crt_libs)
if self.CMAKE_ARGS_TOKEN in line:
line = self._generate_cmake_args(extract_path, options)
if self.QEMU_PIPE_TOKEN in line:
self.qemu_pipe_dir = pathlib.Path(tempfile.mkdtemp())
line = line.replace(self.QEMU_PIPE_TOKEN, str(self.qemu_pipe_dir / "fifo"))
if self.CMSIS_PATH_TOKEN in line and self._cmsis_required(extract_path):
line = line.replace(self.CMSIS_PATH_TOKEN, str(os.environ["CMSIS_PATH"]))
cmake_f.write(line)
heap_size = _get_recommended_heap_size_bytes(options)
if options. |
get("heap_size_bytes"):
board_mem_size = _get_board_mem_size_bytes(options)
heap_size = options["heap_size_bytes"]
if board_mem_size is not None:
assert (
heap_size < board_mem_size
), f"Heap size {heap_size} is larger than memory size {board_mem_size} on this board."
cmake_f.write(
f"target_compile_definitions(app PUBLIC -DHEAP_SIZE_BYTES={heap_size})\n"
)
if options.get("compile_definitions"):
flags = options.get("compile_definitions")
for item in flags:
cmake_f.write(f"target_compile_definitions(app PUBLIC {item})\n")
if self._is_fvp(zephyr_board, options.get("use_fvp")):
cmake_f.write(f"target_compile_definitions(app PUBLIC -DFVP=1)\n")
self._create_prj_conf(project_dir, options)
crt_config_dir = project_dir / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
API_SERVER_DIR / "crt_config" / "crt_config.h", crt_config_dir / "crt_config.h"
)
src_dir = project_dir / "src"
if options["project_type"] != "host_driven" or self._is_fvp(
zephyr_board, options.get("use_fvp")
):
shutil.copytree(API_SERVER_DIR / "src" / options["project_type"], src_dir)
else:
src_dir.mkdir()
shutil.copy2(API_SERVER_DIR / "src" / options["project_type"] / "main.c", src_dir)
if options.get("extra_files_tar"):
with tarfile.open(options["extra_files_tar"], mode="r:*") as tf:
tf.extractall(project_dir)
def build(self, options):
if BUILD_DIR.exists():
shutil.rmtree(BUILD_DIR)
BUILD_DIR.mkdir()
zephyr_board = _find_board_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)
emu_platform = _find_platfo |
rm_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)
env = os.environ
if self._is_fvp(zephyr_board, emu_platform == "armfvp"):
env["ARMFVP_BIN_PATH"] = str((API_SERVER_DIR / "fvp-hack").resolve())
st = os.stat(env["ARMFVP_BIN_PATH"] + "/FVP_Corstone_SSE-300_Ethos-U55")
os.chmod(
env["ARMFVP_BIN_PATH"] + "/FVP_Corstone_SSE-300_Ethos-U55",
st.st_mode | stat.S_IEXEC,
)
check_call(["cmake", "-GNinja", ".."], cwd=BUILD_DIR, env=env)
args = ["ninja"]
if options.get("verbose"):
args.append("-v")
check_call(args, cwd=BUILD_DIR, env=env)
_KNOWN_QEMU_ZEPHYR_BOARDS = ["mps2_an521", "mps3_an547"]
_KNOWN_FVP_ZEPHYR_BOARDS = ["mps3_an547"]
@classmethod
def _is_fvp(cls, board, use_fvp):
if use_fvp:
assert (
board in cls._KNOWN_FVP_ZEPHYR_BOARDS
), "FVP can't be used to emulate this board on Zephyr"
return True
return False
@classmethod
def _is_qemu(cls, board, use_fvp=False):
return "qemu" in board or (
board in cls._KNOWN_QEMU_ZEPHYR_BOARDS and not cls._is_fvp(board, use_fvp)
)
@classmethod
def _has_fpu(cls, zephyr_board):
fpu_boards = [name for name, board in BOARD_PROPERTIES.items() if board["fpu"]]
return zephyr_board in fpu_boards
def flash(self, options):
if _find_platform_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME):
return
zephyr_board = _find_board_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)
if zephyr_board.startswith("nrf5340dk") and _get_flash_runner() == "nrfjprog":
recover_args = ["nrfjprog", "--recover"]
recover_args.extend(_get_nrf_device_args(options))
check_call(recover_args, cwd=API_SERVER_DIR / "build")
check_call(["ninja |
", "flash"], cwd=API_SERVER_DIR / "build")
def open_transport(self, options):
zephyr_board = _find_board_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)
emu_platform = _find_platform_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)
if self._is_fvp(zephyr_board, emu_platform == "armfvp"):
transport = ZephyrFvpTransport(options)
elif self._is_qemu(zephyr_board):
transport = ZephyrQemuTransport(options)
else:
transport = ZephyrSerialTransport(options)
to_return = transport.open()
self._transport = transport
atexit.register(lambda: self.close_transport())
return to_return
def close_transport(self):
if self._transport is not None:
self._transport.close()
self._transport = None
def read_transport(self, n, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.read(n, timeout_sec)
def write_transport(self, data, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.write(data, timeout_sec)
def _set_nonblock(fd):
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
new_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking"
class ZephyrSerialTransport:
NRF5340_VENDOR_ID = 0x1366
NRF5340_DK_BOARD_VCOM_BY_PRODUCT_ID = {0x1055: "VCOM2", 0x1051: "VCOM1"}
@classmethod
def _lookup_baud_rate(cls, options):
sys.path.insert(
0,
os.path.join(
get_zephyr_base(options), "scripts", "dts", "python-devicetree", "src", "devicetree"
),
)
try: |
import dtlib
finally:
sys.path.pop(0)
dt_inst = dtlib.DT(BUILD_DIR / "zephyr" / "zephyr.dts")
uart_baud = (
dt_inst.get_node("/chosen")
.props["zephyr,console"]
.to_path()
.props["current-speed"]
.to_num()
)
_LOG.debug("zephyr transport: found UART baudrate from devicetree: %d", uart_baud)
return uart_baud
@classmethod
def _find_nrf_serial_port(cls, options):
com_ports = subprocess.check_output(
["nrfjprog", "--com"] + _get_device_args(options), encoding="utf-8"
)
ports_by_vcom = {}
for line in com_ports.split("\n")[:-1]:
parts = line.split()
ports_by_vcom[parts[2]] = parts[1]
nrf_board = usb.core.find(idVendor=cls.NRF5340_VENDOR_ID)
if nrf_board == None:
raise Exception("_find_nrf_serial_port: unable to find NRF5340DK")
if nrf_board.idProduct in cls.NRF5340_DK_BOARD_VCOM_BY_PRODUCT_ID:
vcom_port = cls.NRF5340_DK_BOARD_VCOM_BY_PRODUCT_ID[nrf_board.idProduct]
else:
raise Exception("_find_nrf_serial_port: unable to find known NRF5340DK product ID")
return ports_by_vcom[vcom_port]
@classmethod
def _find_openocd_serial_port(cls, options):
serial_number = options.get("openocd_serial")
return generic_find_serial_port(serial_number)
@classmethod
def _find_jlink_serial_port(cls, options):
return generic_find_serial_port()
@classmethod
def _find_stm32cubeprogrammer_serial_port(cls, options):
return generic_find_serial_port()
@classmethod
def _find_serial_port(cls, options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return cls._find_nrf_serial_port(options)
if flash_runner == "openocd":
return cls._find_openocd_serial_port(options)
if flash_runner == "jlink":
return cls._find_jlink_seria |
l_port(options)
if flash_runner == "stm32cubeprogrammer":
return cls._find_stm32cubeprogrammer_serial_port(options)
raise RuntimeError(f"Don't know how to deduce serial port for flash runner {flash_runner}")
def __init__(self, options):
self._options = options
self._port = None
def open(self):
port_path = self._find_serial_port(self._options)
self._port = serial.Serial(port_path, baudrate=self._lookup_baud_rate(self._options))
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=5.0,
session_established_timeout_sec=5.0,
)
def close(self):
self._port.close()
self._port = None
def read(self, n, timeout_sec):
self._port.timeout = timeout_sec
to_return = self._port.read(n)
if not to_return:
raise server.IoTimeoutError()
return to_return
def write(self, data, timeout_sec):
self._port.write_timeout = timeout_sec
bytes_written = 0
while bytes_written < len(data):
n = self._port.write(data)
data = data[n:]
bytes_written += n |
class ZephyrQemuMakeResult(enum.Enum):
QEMU_STARTED = "qemu_started"
MAKE_FAILED = "make_failed"
EOF = "eof"
class ZephyrQemuTransport:
"""The user-facing Zephyr QEMU transport class."""
def __init__(self, options):
self.options = options
self.proc = None
self.pipe_dir = None
self.read_fd = None
self.write_fd = None
self._queue = queue.Queue()
def open(self):
with open(BUILD_DIR / "CMakeCache.txt", "r") as cmake_cache_f:
for line in cmake_cache_f:
if "QEMU_PIPE:" in line:
self.pipe = pathlib.Path(line[line.find("=") + 1 :])
break
self.pipe_dir = self.pipe.parents[0]
self.write_pipe = self.pipe_dir / "fifo.in"
self.read_pipe = self.pipe_dir / "fifo.out"
os.mkfifo(self.write_pipe)
os.mkfifo(self.read_pipe)
env = None
if self.options.get("gdbserver_port"):
env = os.environ.copy()
env["TVM_QEMU_GDBSERVER_PORT"] = self.options["gdbserver_port"]
self.proc = subprocess.Popen(
["ninja", "run"],
cwd=BUILD_DIR,
env=env,
stdout=subprocess.PIPE,
)
self._wait_for_qemu()
self.read_fd = os.open(self.read_pipe, os.O_RDWR | os.O_NONBLOCK)
self.write_fd = os.open(self.write_pipe, os.O_RDWR | os.O_NONBLOCK)
_set_nonblock(self.read_fd)
_set_nonblock(self.write_fd)
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=10.0,
session_established_timeout_sec=10.0,
)
def close(self):
did_write = False
if self.write_fd is not None:
try:
server.write_with_timeout(
self.write_fd, b"\x01x", 1.0
)
did_write = True
except server.IoTimeoutError:
pass
o |
s.close(self.write_fd)
self.write_fd = None
if self.proc:
if not did_write:
self.proc.terminate()
try:
self.proc.wait(5.0)
except subprocess.TimeoutExpired:
self.proc.kill()
if self.read_fd:
os.close(self.read_fd)
self.read_fd = None
if self.pipe_dir is not None:
shutil.rmtree(self.pipe_dir)
self.pipe_dir = None
def read(self, n, timeout_sec):
return server.read_with_timeout(self.read_fd, n, timeout_sec)
def write(self, data, timeout_sec):
to_write = bytearray()
escape_pos = []
for i, b in enumerate(data):
if b == 0x01:
to_write.append(b)
escape_pos.append(i)
to_write.append(b)
while to_write:
num_written = server.write_with_timeout(self.write_fd, to_write, timeout_sec)
to_write = to_write[num_written:]
def _qemu_check_stdout(self):
for line in self.proc.stdout:
line = str(line)
_LOG.info("%s", line)
if "[QEMU] CPU" in line:
self._queue.put(ZephyrQemuMakeResult.QEMU_STARTED)
else:
line = re.sub("[^a-zA-Z0-9 \n]", "", line)
pattern = r"recipe for target (\w*) failed"
if re.search(pattern, line, re.IGNORECASE):
self._queue.put(ZephyrQemuMakeResult.MAKE_FAILED)
self._queue.put(ZephyrQemuMakeResult.EOF)
def _wait_for_qemu(self):
threading.Thread(target=self._qemu_check_stdout, daemon=True).start()
while True:
try:
item = self._queue.get(timeout=120)
except Exception:
raise TimeoutError("QEMU setup timeout.")
if item == ZephyrQemuMakeResult.QEMU_STARTED:
break
if item in [ZephyrQemuMakeResult.MAKE_FAILED, ZephyrQemuMakeResult.EOF]:
raise R |
untimeError("QEMU setup failed.")
raise ValueError(f"{item} not expected.") |
class ZephyrFvpMakeResult(enum.Enum):
FVP_STARTED = "fvp_started"
MICROTVM_API_SERVER_INIT = "fvp_initialized"
MAKE_FAILED = "make_failed"
EOF = "eof"
class BlockingStream:
"""Reimplementation of Stream class from Iris with blocking semantics."""
def __init__(self):
self.q = queue.Queue()
self.unread = None
def read(self, n=-1, timeout_sec=None):
assert (
n != -1
), "expect firmware to open stdin using raw mode, and therefore expect sized read requests"
data = b""
if self.unread:
data = data + self.unread
self.unread = None
while len(data) < n:
try:
data += self.q.get(block=not len(data), timeout=timeout_sec)
except queue.Empty:
break
if len(data) > n:
self.unread = data[n:]
data = data[:n]
return data
readline = read
def write(self, data):
self.q.put(data)
class ZephyrFvpTransport:
"""A transport class that communicates with the ARM FVP via Iris server."""
def __init__(self, options):
self.options = options
self.proc = None
self._queue = queue.Queue()
self._import_iris()
def _import_iris(self):
assert "arm_fvp_path" in self.options, "arm_fvp_path is not defined."
iris_lib_path = (
pathlib.Path(self.options["arm_fvp_path"]).parent.parent.parent
/ "Iris"
/ "Python"
/ "iris"
)
sys.path.insert(0, str(iris_lib_path.parent))
try: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.