File size: 3,083 Bytes
477da44 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import numpy as np
import cv2
from rknn.api import RKNN
import os
def export_pytorch_model():
import torch
import torchvision.models as models
net = models.quantization.resnet18(pretrained=True, quantize=True)
net.eval()
trace_model = torch.jit.trace(net, torch.Tensor(1, 3, 224, 224))
trace_model.save('./resnet18_i8.pt')
def show_outputs(output):
index = sorted(range(len(output)), key=lambda k : output[k], reverse=True)
fp = open('./labels.txt', 'r')
labels = fp.readlines()
top5_str = 'resnet18\n-----TOP 5-----\n'
for i in range(5):
value = output[index[i]]
if value > 0:
topi = '[{:>3d}] score:{:.6f} class:"{}"\n'.format(index[i], value, labels[index[i]].strip().split(':')[-1])
else:
topi = '[ -1]: 0.0\n'
top5_str += topi
print(top5_str.strip())
def show_perfs(perfs):
perfs = 'perfs: {}\n'.format(perfs)
print(perfs)
def softmax(x):
return np.exp(x)/sum(np.exp(x))
def torch_version():
import torch
torch_ver = torch.__version__.split('.')
torch_ver[2] = torch_ver[2].split('+')[0]
return [int(v) for v in torch_ver]
if __name__ == '__main__':
if torch_version() < [1, 9, 0]:
import torch
print("Your torch version is '{}', in order to better support the Quantization Aware Training (QAT) model,\n"
"Please update the torch version to '1.9.0' or higher!".format(torch.__version__))
exit(0)
model = './resnet18_i8.pt'
if not os.path.exists(model):
export_pytorch_model()
input_size_list = [[1, 3, 224, 224]]
# Create RKNN object
rknn = RKNN(verbose=True)
# Pre-process config
print('--> Config model')
rknn.config(mean_values=[123.675, 116.28, 103.53], std_values=[58.395, 58.395, 58.395], target_platform='rk3566')
print('done')
# Load model
print('--> Loading model')
ret = rknn.load_pytorch(model=model, input_size_list=input_size_list)
if ret != 0:
print('Load model failed!')
exit(ret)
print('done')
# Build model
print('--> Building model')
ret = rknn.build(do_quantization=False)
if ret != 0:
print('Build model failed!')
exit(ret)
print('done')
# Export rknn model
print('--> Export rknn model')
ret = rknn.export_rknn('./resnet_18.rknn')
if ret != 0:
print('Export rknn model failed!')
exit(ret)
print('done')
# Set inputs
img = cv2.imread('./space_shuttle_224.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.expand_dims(img, 0)
# Init runtime environment
print('--> Init runtime environment')
ret = rknn.init_runtime()
if ret != 0:
print('Init runtime environment failed!')
exit(ret)
print('done')
# Inference
print('--> Running model')
outputs = rknn.inference(inputs=[img], data_format=['nhwc'])
np.save('./pytorch_resnet18_qat_0.npy', outputs[0])
show_outputs(softmax(np.array(outputs[0][0])))
print('done')
rknn.release()
|