Spaces:
Sleeping
Sleeping
Initial commit for ai-digital-coach app
Browse files- app.py +154 -0
- requirements.txt +4 -0
app.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import mediapipe as mp
|
5 |
+
|
6 |
+
# --------------- MODULE 1: Utility Function to Calculate Angle ---------------
|
7 |
+
def calculate_angle(a, b, c):
|
8 |
+
a = np.array(a)
|
9 |
+
b = np.array(b)
|
10 |
+
c = np.array(c)
|
11 |
+
|
12 |
+
ba = a - b
|
13 |
+
bc = c - b
|
14 |
+
|
15 |
+
cos_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
|
16 |
+
angle = np.degrees(np.arccos(np.clip(cos_angle, -1.0, 1.0)))
|
17 |
+
|
18 |
+
return angle
|
19 |
+
|
20 |
+
# --------------- MODULE 2: Squat Counting Function (Refactored for Live Updates) ---------------
|
21 |
+
def count_squats_live(video):
|
22 |
+
mp_pose = mp.solutions.pose
|
23 |
+
pose = mp_pose.Pose(min_detection_confidence=0.5, model_complexity=1)
|
24 |
+
mp_drawing = mp.solutions.drawing_utils
|
25 |
+
|
26 |
+
cap = cv2.VideoCapture(video)
|
27 |
+
squat_count = 0
|
28 |
+
is_squat_down = False
|
29 |
+
|
30 |
+
while cap.isOpened():
|
31 |
+
ret, frame = cap.read()
|
32 |
+
if not ret:
|
33 |
+
break
|
34 |
+
|
35 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
36 |
+
results = pose.process(frame_rgb)
|
37 |
+
|
38 |
+
if results.pose_landmarks:
|
39 |
+
landmarks = results.pose_landmarks.landmark
|
40 |
+
|
41 |
+
hip = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,
|
42 |
+
landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]
|
43 |
+
knee = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].x,
|
44 |
+
landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y]
|
45 |
+
ankle = [landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x,
|
46 |
+
landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y]
|
47 |
+
|
48 |
+
squat_angle = calculate_angle(hip, knee, ankle)
|
49 |
+
|
50 |
+
if squat_angle < 80 and not is_squat_down:
|
51 |
+
is_squat_down = True
|
52 |
+
elif squat_angle > 150 and is_squat_down:
|
53 |
+
squat_count += 1
|
54 |
+
is_squat_down = False
|
55 |
+
|
56 |
+
mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
|
57 |
+
|
58 |
+
# Draw the angle at the knee
|
59 |
+
cv2.putText(frame, f"Angle: {int(squat_angle)}", (10, 140),
|
60 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
61 |
+
cv2.line(frame, (int(hip[0] * frame.shape[1]), int(hip[1] * frame.shape[0])),
|
62 |
+
(int(knee[0] * frame.shape[1]), int(knee[1] * frame.shape[0])), (0, 0, 255), 2)
|
63 |
+
cv2.line(frame, (int(knee[0] * frame.shape[1]), int(knee[1] * frame.shape[0])),
|
64 |
+
(int(ankle[0] * frame.shape[1]), int(ankle[1] * frame.shape[0])), (0, 0, 255), 2)
|
65 |
+
|
66 |
+
# Display counts and legend
|
67 |
+
cv2.putText(frame, f"Squats: {squat_count}", (10, 100),
|
68 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
69 |
+
|
70 |
+
yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
71 |
+
|
72 |
+
cap.release()
|
73 |
+
|
74 |
+
# --------------- MODULE 3: Pushup Counting Function (Refactored for Live Updates) ---------------
|
75 |
+
def count_pushups_live(video):
|
76 |
+
mp_pose = mp.solutions.pose
|
77 |
+
pose = mp_pose.Pose(min_detection_confidence=0.5, model_complexity=1)
|
78 |
+
mp_drawing = mp.solutions.drawing_utils
|
79 |
+
|
80 |
+
cap = cv2.VideoCapture(video)
|
81 |
+
pushup_count = 0
|
82 |
+
is_pushup_down = False
|
83 |
+
|
84 |
+
while cap.isOpened():
|
85 |
+
ret, frame = cap.read()
|
86 |
+
if not ret:
|
87 |
+
break
|
88 |
+
|
89 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
90 |
+
results = pose.process(frame_rgb)
|
91 |
+
|
92 |
+
if results.pose_landmarks:
|
93 |
+
landmarks = results.pose_landmarks.landmark
|
94 |
+
|
95 |
+
shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,
|
96 |
+
landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
|
97 |
+
elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x,
|
98 |
+
landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y]
|
99 |
+
wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
|
100 |
+
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
|
101 |
+
|
102 |
+
pushup_angle = calculate_angle(shoulder, elbow, wrist)
|
103 |
+
|
104 |
+
if pushup_angle < 90 and not is_pushup_down:
|
105 |
+
is_pushup_down = True
|
106 |
+
elif pushup_angle > 160 and is_pushup_down:
|
107 |
+
pushup_count += 1
|
108 |
+
is_pushup_down = False
|
109 |
+
|
110 |
+
mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
|
111 |
+
|
112 |
+
# Draw the angle at the elbow
|
113 |
+
cv2.putText(frame, f"Angle: {int(pushup_angle)}", (10, 140),
|
114 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
115 |
+
cv2.line(frame, (int(shoulder[0] * frame.shape[1]), int(shoulder[1] * frame.shape[0])),
|
116 |
+
(int(elbow[0] * frame.shape[1]), int(elbow[1] * frame.shape[0])), (0, 0, 255), 2)
|
117 |
+
cv2.line(frame, (int(elbow[0] * frame.shape[1]), int(elbow[1] * frame.shape[0])),
|
118 |
+
(int(wrist[0] * frame.shape[1]), int(wrist[1] * frame.shape[0])), (0, 0, 255), 2)
|
119 |
+
|
120 |
+
# Display counts
|
121 |
+
cv2.putText(frame, f"Pushups: {pushup_count}", (10, 100),
|
122 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
123 |
+
|
124 |
+
yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
125 |
+
|
126 |
+
cap.release()
|
127 |
+
|
128 |
+
# --------------- MODULE 4: Gradio Interface ---------------
|
129 |
+
def process_video_live(video, exercise):
|
130 |
+
if exercise == "squats":
|
131 |
+
generator = count_squats_live(video)
|
132 |
+
elif exercise == "pushups":
|
133 |
+
generator = count_pushups_live(video)
|
134 |
+
|
135 |
+
# Loop through the generator and yield each frame
|
136 |
+
for frame in generator:
|
137 |
+
yield frame
|
138 |
+
|
139 |
+
interface = gr.Interface(
|
140 |
+
fn=process_video_live,
|
141 |
+
inputs=[
|
142 |
+
gr.Video(format="mp4", sources=["upload"], label="Upload Video", height=600),
|
143 |
+
gr.Radio(["squats", "pushups"], value="squats", label="Choose Exercise")
|
144 |
+
],
|
145 |
+
outputs=gr.Image(label="Live analysis", height=600),
|
146 |
+
flagging_mode="never",
|
147 |
+
title="AI-Powered Digital Coach",
|
148 |
+
description="<div style='text-align: center;'>Upload a video to count squats or pushups!</div>",
|
149 |
+
live=False
|
150 |
+
)
|
151 |
+
|
152 |
+
# Launch the app
|
153 |
+
if __name__ == "__main__":
|
154 |
+
interface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
mediapipe==0.10.18
|
2 |
+
opencv-python==4.10.0.84
|
3 |
+
numpy==1.26.4
|
4 |
+
gradio==5.6.0
|