Update app.py
Browse files
app.py
CHANGED
@@ -227,12 +227,16 @@ def calculate_angle(a, b, c):
|
|
227 |
a = np.array(a)
|
228 |
b = np.array(b)
|
229 |
c = np.array(c)
|
230 |
-
radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])
|
231 |
angle = np.abs(radians * 180.0 / np.pi)
|
232 |
if angle > 180.0:
|
233 |
angle = 360 - angle
|
234 |
return angle
|
235 |
|
|
|
|
|
|
|
|
|
236 |
|
237 |
# Detection Queue
|
238 |
result_queue: "queue.Queue[List[Detection]]" = queue.Queue()
|
@@ -257,39 +261,98 @@ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
|
257 |
] if landmarks else []
|
258 |
|
259 |
if landmarks:
|
260 |
-
|
261 |
landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
|
271 |
# Calculate angles
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
|
276 |
-
#
|
277 |
-
cv2.putText(image,
|
278 |
-
|
279 |
-
cv2.putText(image, f"Ankle: {int(ankle_angle)}", (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
|
280 |
|
281 |
# Squat logic
|
282 |
-
if 80 <
|
283 |
cv2.putText(image, "Squat Detected!", (300, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
|
284 |
else:
|
285 |
-
if
|
286 |
cv2.putText(image, "Lean Forward!", (300, 200), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
287 |
-
elif
|
288 |
cv2.putText(image, "Lean Backward!", (300, 200), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
289 |
-
if
|
290 |
cv2.putText(image, "Squat Too Deep!", (300, 250), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
291 |
-
elif
|
292 |
cv2.putText(image, "Lower Your Hips!", (300, 300), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
|
294 |
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
|
295 |
mp_drawing.DrawingSpec(color=(255, 175, 0), thickness=2, circle_radius=2),
|
@@ -299,9 +362,7 @@ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
|
299 |
return av.VideoFrame.from_ndarray(image, format="bgr24")
|
300 |
|
301 |
|
302 |
-
|
303 |
# WebRTC streamer configuration
|
304 |
-
|
305 |
webrtc_streamer(
|
306 |
key="squat-detection",
|
307 |
mode=WebRtcMode.SENDRECV,
|
|
|
227 |
a = np.array(a)
|
228 |
b = np.array(b)
|
229 |
c = np.array(c)
|
230 |
+
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
|
231 |
angle = np.abs(radians * 180.0 / np.pi)
|
232 |
if angle > 180.0:
|
233 |
angle = 360 - angle
|
234 |
return angle
|
235 |
|
236 |
+
counterL = 0 # Counter checks for number of curls
|
237 |
+
correct = 0
|
238 |
+
incorrect = 0
|
239 |
+
stage = None # it checks if we our hand is UP or DOWN
|
240 |
|
241 |
# Detection Queue
|
242 |
result_queue: "queue.Queue[List[Detection]]" = queue.Queue()
|
|
|
261 |
] if landmarks else []
|
262 |
|
263 |
if landmarks:
|
264 |
+
hipL = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,
|
265 |
landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]
|
266 |
+
kneeL = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].x,
|
267 |
+
landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y]
|
268 |
+
ankleL = [landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x,
|
269 |
+
landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y]
|
270 |
+
shoulderL = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,
|
271 |
+
landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
|
272 |
+
footIndexL = [landmarks[mp_pose.PoseLandmark.LEFT_FOOT_INDEX.value].x,
|
273 |
+
landmarks[mp_pose.PoseLandmark.LEFT_FOOT_INDEX.value].y]
|
274 |
|
275 |
# Calculate angles
|
276 |
+
angleKneeL = calculate_angle(hipL, kneeL, ankleL)
|
277 |
+
angleHipL = calculate_angle(shoulderL, hipL, [hipL[0], 0])
|
278 |
+
angleAnkleL = calculate_angle(footIndexL, ankleL, kneeL)
|
279 |
|
280 |
+
# Visualize of left leg
|
281 |
+
cv2.putText(image, str(angleHipL), tuple(np.multiply(angleHipL, [640, 480]).astype(int)),
|
282 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
|
|
|
283 |
|
284 |
# Squat logic
|
285 |
+
if 80 < angleKneeL < 110 and 29 < angleHipL < 40:
|
286 |
cv2.putText(image, "Squat Detected!", (300, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
|
287 |
else:
|
288 |
+
if angleHipL < 29:
|
289 |
cv2.putText(image, "Lean Forward!", (300, 200), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
290 |
+
elif angleHipL > 45:
|
291 |
cv2.putText(image, "Lean Backward!", (300, 200), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
292 |
+
if angleKneeL < 80:
|
293 |
cv2.putText(image, "Squat Too Deep!", (300, 250), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
294 |
+
elif angleKneeL > 110:
|
295 |
cv2.putText(image, "Lower Your Hips!", (300, 300), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
296 |
+
|
297 |
+
# 1. Bend Forward Warning
|
298 |
+
if 10 < angleHipL < 18:
|
299 |
+
print(f"AngleHipL when Bend forward warning:{angleHipL}")
|
300 |
+
cv2.rectangle(image, (310, 180), (450, 220), (0, 0, 0), -1)
|
301 |
+
cv2.putText(image, f"Bend Forward", (320, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (150, 120, 255), 1, cv2.LINE_AA)
|
302 |
+
|
303 |
+
# 2. Lean Backward Warning
|
304 |
+
if angleHipL > 45:
|
305 |
+
print(f"AngleHipL when Bend backward warning:{angleHipL}")
|
306 |
+
cv2.rectangle(image, (310, 180), (450, 220), (0, 0, 0), -1)
|
307 |
+
cv2.putText(image, f"Bend Backward", (320, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 120, 255), 1, cv2.LINE_AA)
|
308 |
+
|
309 |
+
# Incorrect movements
|
310 |
+
# 3. Knees not low enough
|
311 |
+
if 110 < angleKneeL < 130:
|
312 |
+
print(f"AngleKneeL when Lower Your Hips warning:{angleKneeL}")
|
313 |
+
cv2.rectangle(image, (220, 40), (450, 80), (0, 0, 0), -1)
|
314 |
+
cv2.putText(image, f"Lower Your Hips", (230, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
|
315 |
+
|
316 |
+
# 3. Knees not low enough and not completed the squat
|
317 |
+
if angleKneeL > 130 and stage == 'mid':
|
318 |
+
print(f"AngleKneeL when Knees not low enough and not completed the squat :{angleKneeL}")
|
319 |
+
cv2.rectangle(image, (220, 40), (450, 80), (0, 0, 0), -1)
|
320 |
+
cv2.putText(image, f"Lower Your Hips", (230, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
|
321 |
+
print(f"Incorrect counter Knees not low enough and not completed the squat :{incorrect}")
|
322 |
+
incorrect += 1
|
323 |
+
stage = 'up'
|
324 |
+
|
325 |
+
# 4. Squat too deep
|
326 |
+
if angleKneeL < 80 and stage == 'mid':
|
327 |
+
print(f"AngleKneeL when Squat too deep warning:{angleKneeL}")
|
328 |
+
cv2.rectangle(image, (220, 40), (450, 80), (0, 0, 0), -1)
|
329 |
+
cv2.putText(image, f"Squat too deep", (230, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
|
330 |
+
print(f"Incorrect counter when Squat too deep warning:{incorrect}")
|
331 |
+
incorrect += 1
|
332 |
+
stage = 'up'
|
333 |
+
|
334 |
+
# stage 4
|
335 |
+
if (80 < angleKneeL < 110) and stage == 'mid':
|
336 |
+
if (18 < angleHipL < 40): # Valid "down" position
|
337 |
+
print(f"AngleKneeL when valid down position:{angleKneeL}")
|
338 |
+
print(f"AngleHipL when valid down position:{angleHipL}")
|
339 |
+
print(f"Correct counter when valid down position:{correct}")
|
340 |
+
correct += 1
|
341 |
+
stage = 'up'
|
342 |
+
cv2.putText(image, f"Correct:{correct}", (400, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
|
343 |
+
cv2.putText(image, f"Incorrect:{incorrect}", (400, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
|
344 |
+
|
345 |
+
# Render Counter to our camera screen
|
346 |
+
# Setup Status box
|
347 |
+
cv2.rectangle(image, (0, 0), (500, 80), (245, 117, 16), -1)
|
348 |
+
|
349 |
+
# REP data
|
350 |
+
cv2.putText(image, 'Left', (10, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
|
351 |
+
cv2.putText(image, str(correct), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA)
|
352 |
+
|
353 |
+
# Stage data for left leg
|
354 |
+
cv2.putText(image, 'STAGE', (230, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
|
355 |
+
cv2.putText(image, stage, (230, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
|
356 |
|
357 |
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
|
358 |
mp_drawing.DrawingSpec(color=(255, 175, 0), thickness=2, circle_radius=2),
|
|
|
362 |
return av.VideoFrame.from_ndarray(image, format="bgr24")
|
363 |
|
364 |
|
|
|
365 |
# WebRTC streamer configuration
|
|
|
366 |
webrtc_streamer(
|
367 |
key="squat-detection",
|
368 |
mode=WebRtcMode.SENDRECV,
|