Commit
·
43a616a
1
Parent(s):
7f8471e
PyTorch 1.6.0 compatability updates
Browse files- detect.py +1 -1
- models/yolo.py +3 -2
- test.py +3 -3
detect.py
CHANGED
@@ -154,7 +154,7 @@ if __name__ == '__main__':
|
|
154 |
|
155 |
with torch.no_grad():
|
156 |
if opt.update: # update all models (to fix SourceChangeWarning)
|
157 |
-
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'
|
158 |
detect()
|
159 |
strip_optimizer(opt.weights)
|
160 |
else:
|
|
|
154 |
|
155 |
with torch.no_grad():
|
156 |
if opt.update: # update all models (to fix SourceChangeWarning)
|
157 |
+
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
|
158 |
detect()
|
159 |
strip_optimizer(opt.weights)
|
160 |
else:
|
models/yolo.py
CHANGED
@@ -90,9 +90,9 @@ class Model(nn.Module):
|
|
90 |
yi = self.forward_once(xi)[0] # forward
|
91 |
# cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
92 |
yi[..., :4] /= si # de-scale
|
93 |
-
if fi
|
94 |
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
|
95 |
-
elif fi
|
96 |
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
|
97 |
y.append(yi)
|
98 |
return torch.cat(y, 1), None # augmented inference, train
|
@@ -148,6 +148,7 @@ class Model(nn.Module):
|
|
148 |
print('Fusing layers... ', end='')
|
149 |
for m in self.model.modules():
|
150 |
if type(m) is Conv:
|
|
|
151 |
m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv
|
152 |
m.bn = None # remove batchnorm
|
153 |
m.forward = m.fuseforward # update forward
|
|
|
90 |
yi = self.forward_once(xi)[0] # forward
|
91 |
# cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
92 |
yi[..., :4] /= si # de-scale
|
93 |
+
if fi == 2:
|
94 |
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
|
95 |
+
elif fi == 3:
|
96 |
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
|
97 |
y.append(yi)
|
98 |
return torch.cat(y, 1), None # augmented inference, train
|
|
|
148 |
print('Fusing layers... ', end='')
|
149 |
for m in self.model.modules():
|
150 |
if type(m) is Conv:
|
151 |
+
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability
|
152 |
m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv
|
153 |
m.bn = None # remove batchnorm
|
154 |
m.forward = m.fuseforward # update forward
|
test.py
CHANGED
@@ -148,8 +148,8 @@ def test(data,
|
|
148 |
|
149 |
# Per target class
|
150 |
for cls in torch.unique(tcls_tensor):
|
151 |
-
ti = (cls == tcls_tensor).nonzero().view(-1) # prediction indices
|
152 |
-
pi = (cls == pred[:, 5]).nonzero().view(-1) # target indices
|
153 |
|
154 |
# Search for detections
|
155 |
if pi.shape[0]:
|
@@ -157,7 +157,7 @@ def test(data,
|
|
157 |
ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
|
158 |
|
159 |
# Append detections
|
160 |
-
for j in (ious > iouv[0]).nonzero():
|
161 |
d = ti[i[j]] # detected target
|
162 |
if d not in detected:
|
163 |
detected.append(d)
|
|
|
148 |
|
149 |
# Per target class
|
150 |
for cls in torch.unique(tcls_tensor):
|
151 |
+
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
|
152 |
+
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
|
153 |
|
154 |
# Search for detections
|
155 |
if pi.shape[0]:
|
|
|
157 |
ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
|
158 |
|
159 |
# Append detections
|
160 |
+
for j in (ious > iouv[0]).nonzero(as_tuple=False):
|
161 |
d = ti[i[j]] # detected target
|
162 |
if d not in detected:
|
163 |
detected.append(d)
|