File size: 1,450 Bytes
0b7b08a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import torch
import time
from nebullvm.api.functions import optimize_model # Install DL compilers
from yolox.exp import get_exp

# Get YOLO model
exp = get_exp(None, 'yolox-s') # select model name
model = exp.get_model()
model.cuda()
model.eval()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Create dummy data for the optimizer
input_data =  [((torch.randn(1, 3, 640, 640).to(device), ), 0) for i in range(100)] 

# ---------- Optimization ---------- 
optimized_model = optimize_model(model, input_data=input_data, optimization_time="constrained")  # Optimization without performance loss


# ---------- Benchmarks ---------- 
# Select image to test the latency of the optimized model

# Create dummy image
img = torch.randn(1, 3, 640, 640).to(device)

# Check perfomance
warmup_iters = 30
num_iters = 100

# Unptimized model perfomance
with torch.no_grad():
  for i in range(warmup_iters):
    o = model(img)

    start = time.time()
    for i in range(num_iters):
      o = model(img)
stop = time.time()
print(f"Average inference time of unoptimized YOLOX: {(stop - start)/num_iters*1000} ms")

# Optimized model perfomance
with torch.no_grad():
  for i in range(warmup_iters):
    res = optimized_model(img)

    start = time.time()
    for i in range(num_iters):
      res = optimized_model(img)
stop = time.time()
print(f"Average inference time of YOLOX otpimized with nebullvm: {(stop - start)/num_iters*1000} ms")