HRNet / hrnet_quantized_onnx_inference.py
jiliu1's picture
Upload 3 files
d124dee
raw
history blame
3.06 kB
import os
import argparse
import random
import onnxruntime
import numpy as np
import cv2
from PIL import Image
import torch
import torch.nn.functional as F
from utils import input_transform, pad_image, resize_image, preprocess, get_confusion_matrix
parser = argparse.ArgumentParser(description='HRNet')
parser.add_argument('-m', '--onnx-model', default='',
type=str, help='Path to onnx model.')
parser.add_argument('-idir', '--img-dir', default='',
type=str, help='Path to image filehold.')
parser.add_argument("--ipu", action="store_true", help="Use IPU for inference.")
parser.add_argument("--provider_config", type=str,
default="vaip_config.json", help="Path of the config file for seting provider_options.")
args = parser.parse_args()
INPUT_SIZE = [512, 1024]
def run_onnx_inference(ort_session, img):
"""Infer an image with onnx seession
Args:
ort_session: Onnx session
img (ndarray): Image to be infered.
Returns:
ndarray: Model inference result.
"""
pre_img, pad_h, pad_w = preprocess(img)
img = np.expand_dims(pre_img, 0)
img = np.transpose(img, (0,2,3,1))
ort_inputs = {ort_session.get_inputs()[0].name: img}
o1 = ort_session.run(None, ort_inputs)[0]
h, w = o1.shape[-2:]
h_cut = int(h / INPUT_SIZE[0] * pad_h)
w_cut = int(w / INPUT_SIZE[1] * pad_w)
o1 = o1[..., :h - h_cut, :w - w_cut]
return o1
def vis(out, image, save_path='color_.png'):
pallete = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32 ]
# out = out[0]
out = out.transpose(0, 3, 1, 2)
if out.shape[2] != image.shape[0] or out.shape[3] != image.shape[1]:
out = torch.from_numpy(out).cpu()
out = F.interpolate(
out, size=image.shape[:2],
mode='bilinear'
).numpy()
classMap_numpy = np.argmax(out[0], axis=0)
classMap_numpy = Image.fromarray(classMap_numpy.astype(np.uint8))
classMap_numpy_color = classMap_numpy.copy()
classMap_numpy_color.putpalette(pallete)
classMap_numpy_color.save(save_path)
if __name__ == "__main__":
onnx_path = args.onnx_model
if args.ipu:
providers = ["VitisAIExecutionProvider"]
provider_options = [{"config_file": args.provider_config}]
else:
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
provider_options = None
img_dir = args.img_dir
ort_session = onnxruntime.InferenceSession(onnx_path, providers=providers, provider_options=provider_options)
img_names = os.listdir(img_dir)
for img_name in img_names:
image_path = os.path.join(img_dir, img_name)
img = cv2.imread(image_path)
img_vis = np.copy(img)
outs = run_onnx_inference(ort_session, img)
vis(outs, img_vis)