File size: 1,484 Bytes
a1d409e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import os
import time

import numpy as np
import onnxruntime as ort


os.environ["ORT_TENSORRT_INT8_ENABLE"] = "1"
os.environ["ORT_TENSORRT_INT8_USE_NATIVE_CALIBRATION_TABLE"] = "0"
os.environ["ORT_TENSORRT_ENGINE_CACHE_ENABLE"] = "1"

sess_opt = ort.SessionOptions()
sess_opt.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
execution_provider = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
sess = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
run_opt = ort.RunOptions()

sequence = 128
batch = 1
input_ids = np.ones((batch, sequence), dtype=np.int64)
attention_mask = np.ones((batch, sequence), dtype=np.int64)
token_type_ids = np.ones((batch, sequence), dtype=np.int64)

print("Warm up phase...")
sess.run(
    None,
    {
        sess.get_inputs()[0].name: input_ids,
        sess.get_inputs()[1].name: attention_mask,
        sess.get_inputs()[2].name: token_type_ids,
    },
    run_options=run_opt,
)

print("Start inference...")
start_time = time.time()
max_iters = 2000
predict = {}
for iter in range(max_iters):
    predict = sess.run(
        None,
        {
            sess.get_inputs()[0].name: input_ids,
            sess.get_inputs()[1].name: attention_mask,
            sess.get_inputs()[2].name: token_type_ids,
        },
        run_options=run_opt,
    )
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))