Enable RKNN2 internal multicore tensor parallel function, speed up inference latency ~1.5s.
Browse files
multiprocess_inference.py
CHANGED
@@ -21,7 +21,7 @@ def vision_encoder_process(load_ready_queue, embedding_queue, img_path_queue, st
|
|
21 |
vision_encoder.load_rknn(VISION_ENCODER_PATH)
|
22 |
end_time = time.time()
|
23 |
print(f"Vision encoder loaded in {end_time - start_time:.2f} seconds")
|
24 |
-
vision_encoder.init_runtime()
|
25 |
|
26 |
# 通知主进程加载完成
|
27 |
load_ready_queue.put("vision_ready")
|
|
|
21 |
vision_encoder.load_rknn(VISION_ENCODER_PATH)
|
22 |
end_time = time.time()
|
23 |
print(f"Vision encoder loaded in {end_time - start_time:.2f} seconds")
|
24 |
+
vision_encoder.init_runtime(core_mask=RKNNLite.NPU_CORE_0_1_2)
|
25 |
|
26 |
# 通知主进程加载完成
|
27 |
load_ready_queue.put("vision_ready")
|