File size: 4,468 Bytes
afaf3bd
 
924c3e9
afaf3bd
 
924c3e9
 
 
afaf3bd
 
 
924c3e9
 
 
 
 
 
 
 
40e0d6d
924c3e9
afaf3bd
 
 
 
 
40e0d6d
afaf3bd
 
40e0d6d
 
 
afaf3bd
 
40e0d6d
afaf3bd
 
924c3e9
afaf3bd
40e0d6d
 
 
 
 
 
924c3e9
 
 
40e0d6d
 
 
 
 
afaf3bd
924c3e9
40e0d6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import gradio as gr
from ultralytics import YOLO
from ultralytics.solutions import ai_gym
import cv2

def process(video_path):
    model = YOLO("yolov8n-pose.pt")
    cap = cv2.VideoCapture(video_path)
    assert cap.isOpened(), "Error reading video file"
    w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))

    video_writer = cv2.VideoWriter("output_video.mp4",
                                   cv2.VideoWriter_fourcc(*'mp4v'),
                                   fps,
                                   (w, h))

    gym_object = ai_gym.AIGym()  # init AI GYM module
    gym_object.set_args(line_thickness=2,
                        view_img=False,  # Set view_img to False to prevent displaying the video in real-time
                        pose_type="pullup",
                        kpts_to_check=[6, 8, 10])

    frame_count = 0
    while cap.isOpened():
        success, im0 = cap.read()
        if not success:
            print("Video processing has been successfully completed.")
            break
        frame_count += 1
        results = model.track(im0, verbose=False)  # Tracking recommended
        im0 = gym_object.start_counting(im0, results, frame_count)
        video_writer.write(im0)

    cap.release()
    video_writer.release()
    cv2.destroyAllWindows()

    return "output_video.mp4"

title = "Push-up Counter"
description = "This app counts the number of push-ups in a video."
# inputs = gr.inputs.Video(label='Input Video')
# outputs = gr.outputs.Video(label='Processed Video')
# example_list = ['pullups.mp4']

# Create the Gradio demo
demo = gr.Interface(fn=process,
                    inputs=gr.Video(label='Input Video'),  
                    outputs=gr.Video(label='Output Video')
                    # title=title,
                    # description=description,
                    # examples=example_list
                    )

# Launch the demo!
demo.launch(show_api=True)








# import gradio as gr
# from ultralytics import YOLO
# from ultralytics.solutions import ai_gym
# import cv2
# import tempfile
# from PIL import Image
# import subprocess

# # Function to upgrade pip
# def upgrade_pip():
#     subprocess.run(['pip', 'install', '--upgrade', 'pip'])

# # Function to process video
# def process(video_path):
#     upgrade_pip()  # Upgrade pip before executing the main function
#     model = YOLO("yolov8n-pose.pt")
#     cap = cv2.VideoCapture(video_path)
#     assert cap.isOpened(), "Error reading video file"
#     w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))

#     temp_dir = tempfile.mkdtemp()  # Create a temporary directory to store processed frames
#     video_writer = cv2.VideoWriter("output_video.mp4",
#                                    cv2.VideoWriter_fourcc(*'mp4v'),
#                                    fps,
#                                    (w, h))

#     gym_object = ai_gym.AIGym()  # init AI GYM module
#     gym_object.set_args(line_thickness=2,
#                         view_img=False,  # Set view_img to False to prevent displaying the video in real-time
#                         pose_type="pushup",
#                         kpts_to_check=[6, 8, 10])

#     frame_count = 0
#     while cap.isOpened():
#         success, im0 = cap.read()
#         if not success:
#             print("Video frame is empty or video processing has been successfully completed.")
#             break
#         frame_count += 1
#         if frame_count % 5 == 0:  # Process every 5th frame
#             results = model.track(im0, verbose=False)  # Tracking recommended
#             im0 = gym_object.start_counting(im0, results, frame_count)
#             # Save processed frame as an image in the temporary directory
#             cv2.imwrite(f"{temp_dir}/{frame_count}.jpg", im0)

#     # Use PIL to create the final video from the processed frames
#     images = [Image.open(f"{temp_dir}/{i}.jpg") for i in range(1, frame_count + 1)]
#     images[0].save("output_video.mp4", save_all=True, append_images=images[1:], duration=1000/fps, loop=0)

#     cap.release()
#     cv2.destroyAllWindows()

#     return "output_video.mp4"

# # Create the Gradio demo
# demo = gr.Interface(fn=process,
#                     inputs=gr.Video(label='Input Video'),  
#                     outputs=gr.Video(label='Processed Video'))

# # Launch the demo!
# demo.launch(show_api=False)