Add `--cache val` (#6663)
Browse filesNew `--cache val` argument will cache validation set only into RAM. Should help multi-GPU training speeds without consuming as much RAM as full `--cache ram`.
train.py
CHANGED
@@ -221,8 +221,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
221 |
|
222 |
# Trainloader
|
223 |
train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls,
|
224 |
-
hyp=hyp, augment=True, cache=opt.cache
|
225 |
-
|
|
|
226 |
prefix=colorstr('train: '), shuffle=True)
|
227 |
mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class
|
228 |
nb = len(train_loader) # number of batches
|
@@ -231,8 +232,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
231 |
# Process 0
|
232 |
if RANK in [-1, 0]:
|
233 |
val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls,
|
234 |
-
hyp=hyp, cache=None if noval else opt.cache,
|
235 |
-
workers=workers * 2, pad=0.5,
|
236 |
prefix=colorstr('val: '))[0]
|
237 |
|
238 |
if not resume:
|
|
|
221 |
|
222 |
# Trainloader
|
223 |
train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls,
|
224 |
+
hyp=hyp, augment=True, cache=None if opt.cache == 'val' else opt.cache,
|
225 |
+
rect=opt.rect, rank=LOCAL_RANK, workers=workers,
|
226 |
+
image_weights=opt.image_weights, quad=opt.quad,
|
227 |
prefix=colorstr('train: '), shuffle=True)
|
228 |
mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class
|
229 |
nb = len(train_loader) # number of batches
|
|
|
232 |
# Process 0
|
233 |
if RANK in [-1, 0]:
|
234 |
val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls,
|
235 |
+
hyp=hyp, cache=None if noval else opt.cache,
|
236 |
+
rect=True, rank=-1, workers=workers * 2, pad=0.5,
|
237 |
prefix=colorstr('val: '))[0]
|
238 |
|
239 |
if not resume:
|