randomshit11's picture
Update main.py
40e0d6d verified
raw
history blame
4.47 kB
import gradio as gr
from ultralytics import YOLO
from ultralytics.solutions import ai_gym
import cv2
def process(video_path):
model = YOLO("yolov8n-pose.pt")
cap = cv2.VideoCapture(video_path)
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
video_writer = cv2.VideoWriter("output_video.mp4",
cv2.VideoWriter_fourcc(*'mp4v'),
fps,
(w, h))
gym_object = ai_gym.AIGym() # init AI GYM module
gym_object.set_args(line_thickness=2,
view_img=False, # Set view_img to False to prevent displaying the video in real-time
pose_type="pullup",
kpts_to_check=[6, 8, 10])
frame_count = 0
while cap.isOpened():
success, im0 = cap.read()
if not success:
print("Video processing has been successfully completed.")
break
frame_count += 1
results = model.track(im0, verbose=False) # Tracking recommended
im0 = gym_object.start_counting(im0, results, frame_count)
video_writer.write(im0)
cap.release()
video_writer.release()
cv2.destroyAllWindows()
return "output_video.mp4"
title = "Push-up Counter"
description = "This app counts the number of push-ups in a video."
# inputs = gr.inputs.Video(label='Input Video')
# outputs = gr.outputs.Video(label='Processed Video')
# example_list = ['pullups.mp4']
# Create the Gradio demo
demo = gr.Interface(fn=process,
inputs=gr.Video(label='Input Video'),
outputs=gr.Video(label='Output Video')
# title=title,
# description=description,
# examples=example_list
)
# Launch the demo!
demo.launch(show_api=True)
# import gradio as gr
# from ultralytics import YOLO
# from ultralytics.solutions import ai_gym
# import cv2
# import tempfile
# from PIL import Image
# import subprocess
# # Function to upgrade pip
# def upgrade_pip():
# subprocess.run(['pip', 'install', '--upgrade', 'pip'])
# # Function to process video
# def process(video_path):
# upgrade_pip() # Upgrade pip before executing the main function
# model = YOLO("yolov8n-pose.pt")
# cap = cv2.VideoCapture(video_path)
# assert cap.isOpened(), "Error reading video file"
# w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
# temp_dir = tempfile.mkdtemp() # Create a temporary directory to store processed frames
# video_writer = cv2.VideoWriter("output_video.mp4",
# cv2.VideoWriter_fourcc(*'mp4v'),
# fps,
# (w, h))
# gym_object = ai_gym.AIGym() # init AI GYM module
# gym_object.set_args(line_thickness=2,
# view_img=False, # Set view_img to False to prevent displaying the video in real-time
# pose_type="pushup",
# kpts_to_check=[6, 8, 10])
# frame_count = 0
# while cap.isOpened():
# success, im0 = cap.read()
# if not success:
# print("Video frame is empty or video processing has been successfully completed.")
# break
# frame_count += 1
# if frame_count % 5 == 0: # Process every 5th frame
# results = model.track(im0, verbose=False) # Tracking recommended
# im0 = gym_object.start_counting(im0, results, frame_count)
# # Save processed frame as an image in the temporary directory
# cv2.imwrite(f"{temp_dir}/{frame_count}.jpg", im0)
# # Use PIL to create the final video from the processed frames
# images = [Image.open(f"{temp_dir}/{i}.jpg") for i in range(1, frame_count + 1)]
# images[0].save("output_video.mp4", save_all=True, append_images=images[1:], duration=1000/fps, loop=0)
# cap.release()
# cv2.destroyAllWindows()
# return "output_video.mp4"
# # Create the Gradio demo
# demo = gr.Interface(fn=process,
# inputs=gr.Video(label='Input Video'),
# outputs=gr.Video(label='Processed Video'))
# # Launch the demo!
# demo.launch(show_api=False)