Update app.py
Browse files
app.py
CHANGED
@@ -221,15 +221,26 @@ def calculate_angle(a, b, c):
|
|
221 |
return angle
|
222 |
|
223 |
# Detection Queue
|
224 |
-
result_queue:
|
225 |
|
226 |
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
227 |
image = frame.to_ndarray(format="bgr24")
|
228 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
|
229 |
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
|
230 |
results = pose.process(image_rgb)
|
231 |
landmarks = results.pose_landmarks.landmark if results.pose_landmarks else []
|
232 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
if landmarks:
|
234 |
hip = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,
|
235 |
landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]
|
@@ -264,14 +275,15 @@ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
|
264 |
cv2.putText(image, "Squat Too Deep!", (300, 250), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
265 |
elif knee_angle > 110:
|
266 |
cv2.putText(image, "Lower Your Hips!", (300, 300), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
267 |
-
|
268 |
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
|
269 |
mp_drawing.DrawingSpec(color=(255, 175, 0), thickness=2, circle_radius=2),
|
270 |
mp_drawing.DrawingSpec(color=(0, 255, 200), thickness=2, circle_radius=2))
|
271 |
|
272 |
-
result_queue.put(
|
273 |
return av.VideoFrame.from_ndarray(image, format="bgr24")
|
274 |
|
|
|
275 |
# WebRTC streamer configuration
|
276 |
webrtc_streamer(
|
277 |
key="squat-detection",
|
|
|
221 |
return angle
|
222 |
|
223 |
# Detection Queue
|
224 |
+
result_queue: queue.Queue[List[Detection]] = queue.Queue()
|
225 |
|
226 |
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
227 |
image = frame.to_ndarray(format="bgr24")
|
228 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
229 |
+
|
230 |
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
|
231 |
results = pose.process(image_rgb)
|
232 |
landmarks = results.pose_landmarks.landmark if results.pose_landmarks else []
|
233 |
|
234 |
+
# Corrected detection logic
|
235 |
+
detections = [
|
236 |
+
Detection(
|
237 |
+
class_id=0, # Assuming a generic class_id for pose detections
|
238 |
+
label="Pose",
|
239 |
+
score=1.0, # Full confidence as pose landmarks were detected
|
240 |
+
box=np.array([0, 0, image.shape[1], image.shape[0]]) # Full image as bounding box
|
241 |
+
)
|
242 |
+
] if landmarks else []
|
243 |
+
|
244 |
if landmarks:
|
245 |
hip = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,
|
246 |
landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]
|
|
|
275 |
cv2.putText(image, "Squat Too Deep!", (300, 250), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
276 |
elif knee_angle > 110:
|
277 |
cv2.putText(image, "Lower Your Hips!", (300, 300), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
|
278 |
+
|
279 |
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
|
280 |
mp_drawing.DrawingSpec(color=(255, 175, 0), thickness=2, circle_radius=2),
|
281 |
mp_drawing.DrawingSpec(color=(0, 255, 200), thickness=2, circle_radius=2))
|
282 |
|
283 |
+
result_queue.put(detections)
|
284 |
return av.VideoFrame.from_ndarray(image, format="bgr24")
|
285 |
|
286 |
+
|
287 |
# WebRTC streamer configuration
|
288 |
webrtc_streamer(
|
289 |
key="squat-detection",
|