🔨 [Update] gradient scaler
Browse files- yolo/lazy.py +1 -0
- yolo/tools/loss_functions.py +9 -7
yolo/lazy.py
CHANGED
@@ -23,6 +23,7 @@ def main(cfg: Config):
|
|
23 |
callbacks=callbacks,
|
24 |
logger=loggers,
|
25 |
log_every_n_steps=1,
|
|
|
26 |
)
|
27 |
|
28 |
match cfg.task.task:
|
|
|
23 |
callbacks=callbacks,
|
24 |
logger=loggers,
|
25 |
log_every_n_steps=1,
|
26 |
+
gradient_clip_val=10,
|
27 |
)
|
28 |
|
29 |
match cfg.task.task:
|
yolo/tools/loss_functions.py
CHANGED
@@ -124,13 +124,15 @@ class DualLoss:
|
|
124 |
aux_iou, aux_dfl, aux_cls = self.loss(aux_predicts, targets)
|
125 |
main_iou, main_dfl, main_cls = self.loss(main_predicts, targets)
|
126 |
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
|
|
|
|
134 |
|
135 |
|
136 |
def create_loss_function(cfg: Config, vec2box) -> DualLoss:
|
|
|
124 |
aux_iou, aux_dfl, aux_cls = self.loss(aux_predicts, targets)
|
125 |
main_iou, main_dfl, main_cls = self.loss(main_predicts, targets)
|
126 |
|
127 |
+
total_loss = [
|
128 |
+
self.iou_rate * (aux_iou * self.aux_rate + main_iou),
|
129 |
+
self.dfl_rate * (aux_dfl * self.aux_rate + main_dfl),
|
130 |
+
self.cls_rate * (aux_cls * self.aux_rate + main_cls),
|
131 |
+
]
|
132 |
+
loss_dict = dict()
|
133 |
+
for name, value in zip(["Box", "DFL", "BCE"], total_loss):
|
134 |
+
loss_dict[f"Loss/{name}Loss"] = value.detach()
|
135 |
+
return sum(total_loss), loss_dict
|
136 |
|
137 |
|
138 |
def create_loss_function(cfg: Config, vec2box) -> DualLoss:
|