randomshit11 commited on
Commit
0c9d70a
·
verified ·
1 Parent(s): c666713

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -10
app.py CHANGED
@@ -7,6 +7,7 @@ import math
7
  from tensorflow.keras.models import Model
8
  from tensorflow.keras.layers import (LSTM, Dense, Dropout, Input, Flatten,
9
  Bidirectional, Permute, multiply)
 
10
 
11
  # Load the pose estimation model from Mediapipe
12
  mp_pose = mp.solutions.pose
@@ -69,10 +70,7 @@ class VideoProcessor:
69
  temp_file.write(video_file.read())
70
  # Now we can open the video file using cv2.VideoCapture()
71
  cap = cv2.VideoCapture(filename)
72
- output_filename = "processed_video.mp4"
73
- frame_width = int(cap.get(3))
74
- frame_height = int(cap.get(4))
75
- out = cv2.VideoWriter(output_filename, cv2.VideoWriter_fourcc(*'mp4v'), 30, (frame_width,frame_height))
76
  while cap.isOpened():
77
  ret, frame = cap.read()
78
  if not ret:
@@ -80,13 +78,11 @@ class VideoProcessor:
80
  frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
81
  results = self.pose.process(frame_rgb)
82
  processed_frame = self.process_frame(frame, results)
83
- out.write(processed_frame)
84
  cap.release()
85
- out.release()
86
  # Remove the temporary file
87
  os.remove(filename)
88
- print("Processed video saved at:", output_filename) # Debug print
89
- return output_filename
90
 
91
  def process_frame(self, frame, results):
92
  # Process the frame using the `process` function
@@ -269,8 +265,14 @@ def main():
269
  video_file = st.file_uploader("Upload a video file", type=["mp4", "avi"])
270
  if video_file is not None:
271
  video_processor = VideoProcessor()
272
- processed_video_file = video_processor.process_video(video_file)
273
- st.video(processed_video_file)
 
 
 
 
 
 
274
 
275
  if __name__ == "__main__":
276
  main()
 
7
  from tensorflow.keras.models import Model
8
  from tensorflow.keras.layers import (LSTM, Dense, Dropout, Input, Flatten,
9
  Bidirectional, Permute, multiply)
10
+ from moviepy.editor import VideoClip
11
 
12
  # Load the pose estimation model from Mediapipe
13
  mp_pose = mp.solutions.pose
 
70
  temp_file.write(video_file.read())
71
  # Now we can open the video file using cv2.VideoCapture()
72
  cap = cv2.VideoCapture(filename)
73
+ frames = []
 
 
 
74
  while cap.isOpened():
75
  ret, frame = cap.read()
76
  if not ret:
 
78
  frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
79
  results = self.pose.process(frame_rgb)
80
  processed_frame = self.process_frame(frame, results)
81
+ frames.append(processed_frame)
82
  cap.release()
 
83
  # Remove the temporary file
84
  os.remove(filename)
85
+ return frames
 
86
 
87
  def process_frame(self, frame, results):
88
  # Process the frame using the `process` function
 
265
  video_file = st.file_uploader("Upload a video file", type=["mp4", "avi"])
266
  if video_file is not None:
267
  video_processor = VideoProcessor()
268
+ frames = video_processor.process_video(video_file)
269
+
270
+ def make_frame(t):
271
+ frame_index = int(t * 30) # Assuming 30 frames per second
272
+ return frames[frame_index]
273
+
274
+ video = VideoClip(make_frame, duration=len(frames) / 30)
275
+ st.video(video)
276
 
277
  if __name__ == "__main__":
278
  main()