π [Fix] #32 bugs, turn 8400->auto anchor size
Browse files
yolo/utils/bounding_box_utils.py
CHANGED
@@ -166,15 +166,14 @@ class BoxMatcher:
|
|
166 |
Get the (predicted class' probabilities) corresponding to the target classes across all anchors
|
167 |
|
168 |
Args:
|
169 |
-
predict_cls [batch x
|
170 |
target_cls [batch x targets]: The class index for each target.
|
171 |
|
172 |
Returns:
|
173 |
[batch x targets x anchors]: The probabilities from `pred_cls` corresponding to the class indices specified in `target_cls`.
|
174 |
"""
|
175 |
-
# TODO: Turn 8400 to HW
|
176 |
-
target_cls = target_cls.expand(-1, -1, 8400)
|
177 |
predict_cls = predict_cls.transpose(1, 2)
|
|
|
178 |
cls_probabilities = torch.gather(predict_cls, 1, target_cls)
|
179 |
return cls_probabilities
|
180 |
|
|
|
166 |
Get the (predicted class' probabilities) corresponding to the target classes across all anchors
|
167 |
|
168 |
Args:
|
169 |
+
predict_cls [batch x anchors x class]: The predicted probabilities for each class across each anchor.
|
170 |
target_cls [batch x targets]: The class index for each target.
|
171 |
|
172 |
Returns:
|
173 |
[batch x targets x anchors]: The probabilities from `pred_cls` corresponding to the class indices specified in `target_cls`.
|
174 |
"""
|
|
|
|
|
175 |
predict_cls = predict_cls.transpose(1, 2)
|
176 |
+
target_cls = target_cls.expand(-1, -1, predict_cls.size(2))
|
177 |
cls_probabilities = torch.gather(predict_cls, 1, target_cls)
|
178 |
return cls_probabilities
|
179 |
|
yolo/utils/model_utils.py
CHANGED
@@ -69,7 +69,7 @@ def create_scheduler(optimizer: Optimizer, schedule_cfg: SchedulerConfig) -> _LR
|
|
69 |
schedule = scheduler_class(optimizer, **schedule_cfg.args)
|
70 |
if hasattr(schedule_cfg, "warmup"):
|
71 |
wepoch = schedule_cfg.warmup.epochs
|
72 |
-
lambda1 = lambda epoch: 0.1 + 0.9 * (epoch
|
73 |
lambda2 = lambda epoch: 10 - 9 * (epoch / wepoch) if epoch < wepoch else 1
|
74 |
warmup_schedule = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2, lambda1])
|
75 |
schedule = SequentialLR(optimizer, schedulers=[warmup_schedule, schedule], milestones=[2])
|
|
|
69 |
schedule = scheduler_class(optimizer, **schedule_cfg.args)
|
70 |
if hasattr(schedule_cfg, "warmup"):
|
71 |
wepoch = schedule_cfg.warmup.epochs
|
72 |
+
lambda1 = lambda epoch: 0.1 + 0.9 * (epoch / wepoch) if epoch < wepoch else 1
|
73 |
lambda2 = lambda epoch: 10 - 9 * (epoch / wepoch) if epoch < wepoch else 1
|
74 |
warmup_schedule = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2, lambda1])
|
75 |
schedule = SequentialLR(optimizer, schedulers=[warmup_schedule, schedule], milestones=[2])
|