Spaces:
Runtime error
Runtime error
import tempfile | |
import cv2 | |
import dlib | |
import numpy as np | |
from scipy.spatial import distance as dist | |
from imutils import face_utils | |
import gradio as gr | |
def detect_head_posture(video_path): | |
detector = dlib.get_frontal_face_detector() | |
predictor = dlib.shape_predictor("assets/models/shape_predictor_68_face_landmarks.dat") | |
cap = cv2.VideoCapture(video_path) | |
frame_width, frame_height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
with tempfile.NamedTemporaryFile(delete=False, suffix='.avi') as temp_file: | |
out = cv2.VideoWriter(temp_file.name, cv2.VideoWriter_fourcc(*'XVID'), 20.0, (frame_width, frame_height)) | |
posture_data = [] | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
for rect in detector(gray, 0): | |
shape = face_utils.shape_to_np(predictor(gray, rect)) | |
jaw_width = dist.euclidean(shape[1], shape[15]) | |
jaw_height = dist.euclidean(shape[8], (shape[1] + shape[15]) / 2) | |
posture = "Upright" if jaw_height / jaw_width > 0.5 else "Slumped" | |
posture_data.append(posture) | |
for (x, y) in shape: | |
cv2.circle(frame, (x, y), 1, (0, 255, 0), -1) | |
out.write(frame) | |
cap.release() | |
out.release() | |
posture_type = max(set(posture_data), key=posture_data.count) | |
return temp_file.name, posture_type | |
def create_head_posture_tab(): | |
with gr.Row(): | |
with gr.Column(scale=1): | |
input_video = gr.Video(label="Input Video") | |
with gr.Row(): | |
clear_btn = gr.Button("Clear") | |
submit_btn = gr.Button("Analyze", elem_classes="submit") | |
with gr.Column(scale=1, elem_classes="dl4"): | |
output_video = gr.Video(label="Processed Video", elem_classes="video2") | |
output_posture = gr.Label(label="Posture Type") | |
submit_btn.click(detect_head_posture, inputs=input_video, outputs=[output_video, output_posture], queue=True) | |
clear_btn.click(lambda: (None, None, None), outputs=[input_video, output_video, output_posture], queue=True) | |
gr.Examples(["./assets/videos/fitness.mp4"], inputs=[input_video]) |