Spaces:
Sleeping
Sleeping
Update Yolov5_Deepsort/models/common.py
Browse files- Yolov5_Deepsort/models/common.py +51 -51
Yolov5_Deepsort/models/common.py
CHANGED
@@ -240,58 +240,58 @@ class AutoShape(nn.Module):
|
|
240 |
return self
|
241 |
|
242 |
@torch.no_grad()
|
243 |
-
def forward(self, imgs, size=640, augment=False, profile=False):
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
256 |
# 直接调用模型,不使用 amp.autocast
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
im, f = np.asarray(im), getattr(im, 'filename', f) or f
|
268 |
-
files.append(Path(f).with_suffix('.jpg').name)
|
269 |
-
if im.shape[0] < 5: # image in CHW
|
270 |
-
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
|
271 |
-
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
|
272 |
-
s = im.shape[:2] # HWC
|
273 |
-
shape0.append(s) # image shape
|
274 |
-
g = (size / max(s)) # gain
|
275 |
-
shape1.append([y * g for y in s])
|
276 |
-
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
|
277 |
-
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
|
278 |
-
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
|
279 |
-
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
|
280 |
-
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
|
281 |
-
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp32
|
282 |
-
t.append(time_synchronized())
|
283 |
-
|
284 |
-
# 直接调用模型,不使用 amp.autocast
|
285 |
-
y = self.model(x, augment, profile)[0] # forward
|
286 |
-
t.append(time_synchronized())
|
287 |
-
|
288 |
-
# Post-process
|
289 |
-
y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS
|
290 |
-
for i in range(n):
|
291 |
-
scale_coords(shape1, y[i][:, :4], shape0[i])
|
292 |
-
|
293 |
-
t.append(time_synchronized())
|
294 |
-
return Detections(imgs, y, files, t, self.names, x.shape)
|
295 |
|
296 |
|
297 |
|
|
|
240 |
return self
|
241 |
|
242 |
@torch.no_grad()
|
243 |
+
def forward(self, imgs, size=640, augment=False, profile=False):
|
244 |
+
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
|
245 |
+
# filename: imgs = 'data/images/zidane.jpg'
|
246 |
+
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
|
247 |
+
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
|
248 |
+
# PIL: = Image.open('image.jpg') # HWC x(640,1280,3)
|
249 |
+
# numpy: = np.zeros((640,1280,3)) # HWC
|
250 |
+
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
|
251 |
+
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
252 |
+
|
253 |
+
t = [time_synchronized()]
|
254 |
+
p = next(self.model.parameters()) # for device and type
|
255 |
+
if isinstance(imgs, torch.Tensor): # torch
|
256 |
+
# 直接调用模型,不使用 amp.autocast
|
257 |
+
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
|
258 |
+
|
259 |
+
# Pre-process
|
260 |
+
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
|
261 |
+
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
|
262 |
+
for i, im in enumerate(imgs):
|
263 |
+
f = f'image{i}' # filename
|
264 |
+
if isinstance(im, str): # filename or uri
|
265 |
+
im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im
|
266 |
+
elif isinstance(im, Image.Image): # PIL Image
|
267 |
+
im, f = np.asarray(im), getattr(im, 'filename', f) or f
|
268 |
+
files.append(Path(f).with_suffix('.jpg').name)
|
269 |
+
if im.shape[0] < 5: # image in CHW
|
270 |
+
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
|
271 |
+
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
|
272 |
+
s = im.shape[:2] # HWC
|
273 |
+
shape0.append(s) # image shape
|
274 |
+
g = (size / max(s)) # gain
|
275 |
+
shape1.append([y * g for y in s])
|
276 |
+
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
|
277 |
+
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
|
278 |
+
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
|
279 |
+
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
|
280 |
+
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
|
281 |
+
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp32
|
282 |
+
t.append(time_synchronized())
|
283 |
+
|
284 |
# 直接调用模型,不使用 amp.autocast
|
285 |
+
y = self.model(x, augment, profile)[0] # forward
|
286 |
+
t.append(time_synchronized())
|
287 |
+
|
288 |
+
# Post-process
|
289 |
+
y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS
|
290 |
+
for i in range(n):
|
291 |
+
scale_coords(shape1, y[i][:, :4], shape0[i])
|
292 |
+
|
293 |
+
t.append(time_synchronized())
|
294 |
+
return Detections(imgs, y, files, t, self.names, x.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
|
296 |
|
297 |
|