SakuraD commited on
Commit
0599b47
Β·
1 Parent(s): 0f1b479
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -54,17 +54,18 @@ def load_video(video_path):
54
  img = Image.fromarray(vr[frame_index].asnumpy())
55
  images_group.append(img)
56
  torch_imgs = transform(images_group)
 
 
 
 
 
57
  return torch_imgs
58
 
59
 
60
  def inference(video):
61
  vid = load_video(video)
62
 
63
- # The model expects inputs of shape: B x C x T x H x W
64
- TC, H, W = vid.shape
65
- inputs = vid.reshape(1, TC//3, 3, H, W).permute(0, 2, 1, 3, 4)
66
-
67
- prediction = model(inputs)
68
  prediction = F.softmax(prediction, dim=1).flatten()
69
 
70
  return {kinetics_id_to_classname[str(i)]: float(prediction[i]) for i in range(400)}
 
54
  img = Image.fromarray(vr[frame_index].asnumpy())
55
  images_group.append(img)
56
  torch_imgs = transform(images_group)
57
+
58
+ # The model expects inputs of shape: B x C x T x H x W
59
+ TC, H, W = torch_imgs.shape
60
+ torch_imgs = torch_imgs.reshape(1, TC//3, 3, H, W).permute(0, 2, 1, 3, 4)
61
+
62
  return torch_imgs
63
 
64
 
65
  def inference(video):
66
  vid = load_video(video)
67
 
68
+ prediction = model(vid)
 
 
 
 
69
  prediction = F.softmax(prediction, dim=1).flatten()
70
 
71
  return {kinetics_id_to_classname[str(i)]: float(prediction[i]) for i in range(400)}