File size: 2,795 Bytes
afaf3bd
 
91b37a6
afaf3bd
c255ae4
 
 
afaf3bd
c255ae4
 
 
 
 
 
 
924c3e9
 
afaf3bd
 
 
c255ae4
924c3e9
 
 
 
 
 
 
 
c255ae4
924c3e9
afaf3bd
 
 
 
 
40e0d6d
afaf3bd
 
c255ae4
 
 
 
 
 
 
 
 
afaf3bd
 
 
 
924c3e9
afaf3bd
40e0d6d
 
c255ae4
 
 
df5c099
40e0d6d
924c3e9
 
c255ae4
 
91b37a6
 
 
 
40e0d6d
afaf3bd
924c3e9
40e0d6d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import gradio as gr
from ultralytics import YOLO
import ai_gym
import cv2
import tempfile
from PIL import Image
import subprocess

# Function to upgrade pip
def upgrade_pip():
    subprocess.run(['pip', 'install', '--upgrade', 'pip'])

# Process video function
def process(video_path, pose_type):
    upgrade_pip()  # Upgrade pip before executing the main function
    model = YOLO("yolov8n-pose.pt")
    cap = cv2.VideoCapture(video_path)
    assert cap.isOpened(), "Error reading video file"
    w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))

    temp_dir = tempfile.mkdtemp()  # Create a temporary directory to store processed frames
    video_writer = cv2.VideoWriter("output_video.mp4",
                                   cv2.VideoWriter_fourcc(*'mp4v'),
                                   fps,
                                   (w, h))

    gym_object = ai_gym.AIGym()  # init AI GYM module
    gym_object.set_args(line_thickness=2,
                        view_img=False,  # Set view_img to False to prevent displaying the video in real-time
                        pose_type=pose_type,
                        kpts_to_check=[6, 8, 10])

    frame_count = 0
    while cap.isOpened():
        success, im0 = cap.read()
        if not success:
            print("Video processing has been successfully completed.")
            break
        frame_count += 1
        if frame_count % 5 == 0:  # Process every 5th frame
            results = model.track(im0, verbose=False)  # Tracking recommended
            im0 = gym_object.start_counting(im0, results, frame_count)
            # Save processed frame as an image in the temporary directory
            cv2.imwrite(f"{temp_dir}/{frame_count}.jpg", im0)

    # Use PIL to create the final video from the processed frames
    images = [Image.open(f"{temp_dir}/{i}.jpg") for i in range(1, frame_count + 1)]
    images[0].save("output_video.mp4", save_all=True, append_images=images[1:], duration=1000/fps, loop=0)

    cap.release()
    cv2.destroyAllWindows()

    return "output_video.mp4"

title = "Push-up Counter"
description = "This app counts the number of push-ups in a video."
inputs = [gr.Video(label='Input Video'),  
          gr.Radio(["pullups", "pushups", "absworkout"], label="Pose Type")]
outputs = gr.Video(label='Output Video')
example_list = [['Examples/PULL-UPS.mp4'],['Examples/PUSH-UPS.mp4']]

# Create the Gradio demo
demo = gr.Interface(fn=process,
                    inputs=inputs,
                    outputs=outputs,
                    title=title,
                    description=description,
                    examples=example_list,
                    cache_examples=True,
                    )

# Launch the demo!
demo.launch(show_api=True)