glenn-jocher commited on
Commit
5948f20
·
unverified ·
1 Parent(s): 8b5086c

Update test.py profiling (#3555)

Browse files

* Update test.py profiling

* half_precision to half

* inplace

Files changed (3) hide show
  1. test.py +17 -13
  2. train.py +13 -13
  3. utils/plots.py +9 -8
test.py CHANGED
@@ -38,7 +38,7 @@ def test(data,
38
  plots=True,
39
  wandb_logger=None,
40
  compute_loss=None,
41
- half_precision=True,
42
  opt=None):
43
  # Initialize/load model and set device
44
  training = model is not None
@@ -63,7 +63,7 @@ def test(data,
63
  # model = nn.DataParallel(model)
64
 
65
  # Half
66
- half = device.type != 'cpu' and half_precision # half precision only supported on CUDA
67
  if half:
68
  model.half()
69
 
@@ -95,20 +95,22 @@ def test(data,
95
  names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
96
  coco91class = coco80_to_coco91_class()
97
  s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95')
98
- p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
99
  loss = torch.zeros(3, device=device)
100
  jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
101
  for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
 
102
  img = img.to(device, non_blocking=True)
103
  img = img.half() if half else img.float() # uint8 to fp16/32
104
  img /= 255.0 # 0 - 255 to 0.0 - 1.0
105
  targets = targets.to(device)
106
  nb, _, height, width = img.shape # batch size, channels, height, width
 
 
107
 
108
  # Run model
109
- t = time_synchronized()
110
  out, train_out = model(img, augment=augment) # inference and training outputs
111
- t0 += time_synchronized() - t
112
 
113
  # Compute loss
114
  if compute_loss:
@@ -119,7 +121,7 @@ def test(data,
119
  lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
120
  t = time_synchronized()
121
  out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
122
- t1 += time_synchronized() - t
123
 
124
  # Statistics per image
125
  for si, pred in enumerate(out):
@@ -236,9 +238,10 @@ def test(data,
236
  print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
237
 
238
  # Print speeds
239
- t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
240
  if not training:
241
- print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
 
242
 
243
  # Plots
244
  if plots:
@@ -327,24 +330,25 @@ if __name__ == '__main__':
327
  save_txt=opt.save_txt | opt.save_hybrid,
328
  save_hybrid=opt.save_hybrid,
329
  save_conf=opt.save_conf,
330
- half_precision=opt.half,
331
  opt=opt
332
  )
333
 
334
  elif opt.task == 'speed': # speed benchmarks
335
- for w in opt.weights:
336
- test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt)
 
337
 
338
  elif opt.task == 'study': # run over a range of settings and save/plot
339
  # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
340
  x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
341
- for w in opt.weights:
342
  f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
343
  y = [] # y axis
344
  for i in x: # img-size
345
  print(f'\nRunning {f} point {i}...')
346
  r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
347
- plots=False, opt=opt)
348
  y.append(r + t) # results and times
349
  np.savetxt(f, y, fmt='%10.4g') # save
350
  os.system('zip -r study.zip study_*.txt')
 
38
  plots=True,
39
  wandb_logger=None,
40
  compute_loss=None,
41
+ half=True,
42
  opt=None):
43
  # Initialize/load model and set device
44
  training = model is not None
 
63
  # model = nn.DataParallel(model)
64
 
65
  # Half
66
+ half &= device.type != 'cpu' # half precision only supported on CUDA
67
  if half:
68
  model.half()
69
 
 
95
  names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
96
  coco91class = coco80_to_coco91_class()
97
  s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95')
98
+ p, r, f1, mp, mr, map50, map, t0, t1, t2 = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.
99
  loss = torch.zeros(3, device=device)
100
  jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
101
  for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
102
+ t_ = time_synchronized()
103
  img = img.to(device, non_blocking=True)
104
  img = img.half() if half else img.float() # uint8 to fp16/32
105
  img /= 255.0 # 0 - 255 to 0.0 - 1.0
106
  targets = targets.to(device)
107
  nb, _, height, width = img.shape # batch size, channels, height, width
108
+ t = time_synchronized()
109
+ t0 += t - t_
110
 
111
  # Run model
 
112
  out, train_out = model(img, augment=augment) # inference and training outputs
113
+ t1 += time_synchronized() - t
114
 
115
  # Compute loss
116
  if compute_loss:
 
121
  lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
122
  t = time_synchronized()
123
  out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
124
+ t2 += time_synchronized() - t
125
 
126
  # Statistics per image
127
  for si, pred in enumerate(out):
 
238
  print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
239
 
240
  # Print speeds
241
+ t = tuple(x / seen * 1E3 for x in (t0, t1, t2)) # speeds per image
242
  if not training:
243
+ shape = (batch_size, 3, imgsz, imgsz)
244
+ print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
245
 
246
  # Plots
247
  if plots:
 
330
  save_txt=opt.save_txt | opt.save_hybrid,
331
  save_hybrid=opt.save_hybrid,
332
  save_conf=opt.save_conf,
333
+ half=opt.half,
334
  opt=opt
335
  )
336
 
337
  elif opt.task == 'speed': # speed benchmarks
338
+ for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
339
+ test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, half=True,
340
+ opt=opt)
341
 
342
  elif opt.task == 'study': # run over a range of settings and save/plot
343
  # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
344
  x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
345
+ for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
346
  f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
347
  y = [] # y axis
348
  for i in x: # img-size
349
  print(f'\nRunning {f} point {i}...')
350
  r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
351
+ plots=False, half=True, opt=opt)
352
  y.append(r + t) # results and times
353
  np.savetxt(f, y, fmt='%10.4g') # save
354
  os.system('zip -r study.zip study_*.txt')
train.py CHANGED
@@ -74,7 +74,7 @@ def train(hyp, opt, device, tb_writer=None):
74
  loggers['wandb'] = wandb_logger.wandb
75
  data_dict = wandb_logger.data_dict
76
  if wandb_logger.wandb:
77
- weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
78
 
79
  nc = 1 if single_cls else int(data_dict['nc']) # number of classes
80
  names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
@@ -354,18 +354,18 @@ def train(hyp, opt, device, tb_writer=None):
354
  final_epoch = epoch + 1 == epochs
355
  if not opt.notest or final_epoch: # Calculate mAP
356
  wandb_logger.current_epoch = epoch + 1
357
- results, maps, times = test.test(data_dict,
358
- batch_size=batch_size * 2,
359
- imgsz=imgsz_test,
360
- model=ema.ema,
361
- single_cls=single_cls,
362
- dataloader=testloader,
363
- save_dir=save_dir,
364
- save_json=is_coco and final_epoch,
365
- verbose=nc < 50 and final_epoch,
366
- plots=plots and final_epoch,
367
- wandb_logger=wandb_logger,
368
- compute_loss=compute_loss)
369
 
370
  # Write
371
  with open(results_file, 'a') as f:
 
74
  loggers['wandb'] = wandb_logger.wandb
75
  data_dict = wandb_logger.data_dict
76
  if wandb_logger.wandb:
77
+ weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update weights, epochs if resuming
78
 
79
  nc = 1 if single_cls else int(data_dict['nc']) # number of classes
80
  names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
 
354
  final_epoch = epoch + 1 == epochs
355
  if not opt.notest or final_epoch: # Calculate mAP
356
  wandb_logger.current_epoch = epoch + 1
357
+ results, maps, _ = test.test(data_dict,
358
+ batch_size=batch_size * 2,
359
+ imgsz=imgsz_test,
360
+ model=ema.ema,
361
+ single_cls=single_cls,
362
+ dataloader=testloader,
363
+ save_dir=save_dir,
364
+ save_json=is_coco and final_epoch,
365
+ verbose=nc < 50 and final_epoch,
366
+ plots=plots and final_epoch,
367
+ wandb_logger=wandb_logger,
368
+ compute_loss=compute_loss)
369
 
370
  # Write
371
  with open(results_file, 'a') as f:
utils/plots.py CHANGED
@@ -3,7 +3,6 @@
3
  import glob
4
  import math
5
  import os
6
- import random
7
  from copy import copy
8
  from pathlib import Path
9
 
@@ -252,21 +251,23 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
252
 
253
  def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
254
  # Plot study.txt generated by test.py
255
- fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
256
- # ax = ax.ravel()
 
257
 
258
  fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
259
  # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
260
  for f in sorted(Path(path).glob('study*.txt')):
261
  y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
262
  x = np.arange(y.shape[1]) if x is None else np.array(x)
263
- s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
264
- # for i in range(7):
265
- # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
266
- # ax[i].set_title(s[i])
 
267
 
268
  j = y[3].argmax() + 1
269
- ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
270
  label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
271
 
272
  ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
 
3
  import glob
4
  import math
5
  import os
 
6
  from copy import copy
7
  from pathlib import Path
8
 
 
251
 
252
  def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
253
  # Plot study.txt generated by test.py
254
+ plot2 = False # plot additional results
255
+ if plot2:
256
+ ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
257
 
258
  fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
259
  # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
260
  for f in sorted(Path(path).glob('study*.txt')):
261
  y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
262
  x = np.arange(y.shape[1]) if x is None else np.array(x)
263
+ if plot2:
264
+ s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
265
+ for i in range(7):
266
+ ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
267
+ ax[i].set_title(s[i])
268
 
269
  j = y[3].argmax() + 1
270
+ ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
271
  label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
272
 
273
  ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],