File size: 2,162 Bytes
afaf3bd
 
924c3e9
afaf3bd
 
924c3e9
afaf3bd
924c3e9
 
 
afaf3bd
 
 
924c3e9
 
 
 
 
 
 
 
 
 
 
afaf3bd
 
 
 
 
 
 
 
924c3e9
 
 
 
 
 
 
 
 
afaf3bd
 
 
 
924c3e9
afaf3bd
924c3e9
 
 
 
afaf3bd
924c3e9
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import gradio as gr
from ultralytics import YOLO
from ultralytics.solutions import ai_gym
import cv2
import tempfile
from PIL import Image

def process(video_path):
    model = YOLO("yolov8n-pose.pt")
    cap = cv2.VideoCapture(video_path)
    assert cap.isOpened(), "Error reading video file"
    w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))

    temp_dir = tempfile.mkdtemp()  # Create a temporary directory to store processed frames
    video_writer = cv2.VideoWriter("output_video.mp4",
                                   cv2.VideoWriter_fourcc(*'mp4v'),
                                   fps,
                                   (w, h))

    gym_object = ai_gym.AIGym()  # init AI GYM module
    gym_object.set_args(line_thickness=2,
                        view_img=False,  # Set view_img to False to prevent displaying the video in real-time
                        pose_type="pushup",
                        kpts_to_check=[6, 8, 10])

    frame_count = 0
    while cap.isOpened():
        success, im0 = cap.read()
        if not success:
            print("Video frame is empty or video processing has been successfully completed.")
            break
        frame_count += 1
        if frame_count % 5 == 0:  # Process every 5th frame
            results = model.track(im0, verbose=False)  # Tracking recommended
            im0 = gym_object.start_counting(im0, results, frame_count)
            # Save processed frame as an image in the temporary directory
            cv2.imwrite(f"{temp_dir}/{frame_count}.jpg", im0)

    # Use PIL to create the final video from the processed frames
    images = [Image.open(f"{temp_dir}/{i}.jpg") for i in range(1, frame_count + 1)]
    images[0].save("output_video.mp4", save_all=True, append_images=images[1:], duration=1000/fps, loop=0)

    cap.release()
    cv2.destroyAllWindows()

    return "output_video.mp4"

# Create the Gradio demo
demo = gr.Interface(fn=process,
                    inputs=gr.Video(label='Input Video'),  
                    outputs=gr.Video(label='Processed Video'))

# Launch the demo!
demo.launch(show_api=False)