glenn-jocher commited on
Commit
c84dd27
·
unverified ·
1 Parent(s): c6b4f84

New val.py `cuda` variable (#6957)

Browse files

* New val.py `cuda` variable

Fix for ONNX GPU val.

* Update val.py

Files changed (1) hide show
  1. val.py +3 -2
val.py CHANGED
@@ -143,7 +143,7 @@ def run(data,
143
  batch_size = model.batch_size
144
  else:
145
  device = model.device
146
- if not pt or jit:
147
  batch_size = 1 # export.py models default to batch-size 1
148
  LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
149
 
@@ -152,6 +152,7 @@ def run(data,
152
 
153
  # Configure
154
  model.eval()
 
155
  is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
156
  nc = 1 if single_cls else int(data['nc']) # number of classes
157
  iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
@@ -177,7 +178,7 @@ def run(data,
177
  pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
178
  for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
179
  t1 = time_sync()
180
- if pt or jit or engine:
181
  im = im.to(device, non_blocking=True)
182
  targets = targets.to(device)
183
  im = im.half() if half else im.float() # uint8 to fp16/32
 
143
  batch_size = model.batch_size
144
  else:
145
  device = model.device
146
+ if not (pt or jit):
147
  batch_size = 1 # export.py models default to batch-size 1
148
  LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
149
 
 
152
 
153
  # Configure
154
  model.eval()
155
+ cuda = device.type != 'cpu'
156
  is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
157
  nc = 1 if single_cls else int(data['nc']) # number of classes
158
  iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
 
178
  pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
179
  for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
180
  t1 = time_sync()
181
+ if cuda:
182
  im = im.to(device, non_blocking=True)
183
  targets = targets.to(device)
184
  im = im.half() if half else im.float() # uint8 to fp16/32