DDP after autoanchor reorder (#2421)
Browse files
train.py
CHANGED
@@ -181,10 +181,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
|
|
181 |
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
|
182 |
logger.info('Using SyncBatchNorm()')
|
183 |
|
184 |
-
# DDP mode
|
185 |
-
if cuda and rank != -1:
|
186 |
-
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
|
187 |
-
|
188 |
# Trainloader
|
189 |
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
|
190 |
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
|
@@ -214,7 +210,11 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
|
|
214 |
# Anchors
|
215 |
if not opt.noautoanchor:
|
216 |
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
|
217 |
-
model.half().float() # pre-reduce anchor precision
|
|
|
|
|
|
|
|
|
218 |
|
219 |
# Model parameters
|
220 |
hyp['box'] *= 3. / nl # scale to layers
|
|
|
181 |
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
|
182 |
logger.info('Using SyncBatchNorm()')
|
183 |
|
|
|
|
|
|
|
|
|
184 |
# Trainloader
|
185 |
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
|
186 |
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
|
|
|
210 |
# Anchors
|
211 |
if not opt.noautoanchor:
|
212 |
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
|
213 |
+
model.half().float() # pre-reduce anchor precision
|
214 |
+
|
215 |
+
# DDP mode
|
216 |
+
if cuda and rank != -1:
|
217 |
+
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
|
218 |
|
219 |
# Model parameters
|
220 |
hyp['box'] *= 3. / nl # scale to layers
|