Pratyush101 commited on
Commit
f33062d
·
verified ·
1 Parent(s): d8d2273

Update app.py

Browse files

I am trying to reduce the lag by keeping the pose outside of the function if it will not work return back to original function

Files changed (1) hide show
  1. app.py +51 -83
app.py CHANGED
@@ -3,13 +3,11 @@ import queue
3
  from pathlib import Path
4
  from typing import List, NamedTuple
5
  import mediapipe as mp
6
-
7
  import av
8
  import cv2
9
  import numpy as np
10
  import streamlit as st
11
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
12
-
13
  from sample_utils.download import download_file
14
  from sample_utils.turn import get_ice_servers
15
 
@@ -67,10 +65,12 @@ def calculate_angle(a, b, c):
67
  angle = 360 - angle
68
  return angle
69
 
70
-
71
  # Detection Queue
72
  result_queue: "queue.Queue[List[Detection]]" = queue.Queue()
73
 
 
 
 
74
  def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
75
  global counterL, correct, incorrect, stage
76
  if 'stage' not in globals():
@@ -82,87 +82,55 @@ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
82
  h, w = image.shape[:2]
83
  image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
84
 
85
- with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
86
- results = pose.process(image_rgb)
87
- landmarks = results.pose_landmarks.landmark if results.pose_landmarks else []
88
-
89
- # Corrected detection logic
90
- detections = [
91
- Detection(
92
- class_id=0, # Assuming a generic class_id for pose detections
93
- label="Pose",
94
- score=0.5, # Full confidence as pose landmarks were detected
95
- box=np.array([0, 0, image.shape[1], image.shape[0]]) # Full image as bounding box
96
- ) ] if landmarks else []
97
-
98
-
99
- if landmarks:
100
- hipL = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,
101
- landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]
102
- kneeL = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].x,
103
- landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y]
104
- ankleL = [landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x,
105
- landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y]
106
- shoulderL = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,
107
- landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
108
-
109
- # Calculate angles
110
- angleKneeL = calculate_angle(hipL, kneeL, ankleL)
111
- angleHipL = calculate_angle(shoulderL, hipL, [hipL[0], 0])
112
-
113
- #Visualize of left leg
114
- # cv2.putText(image, str(angleHipL), tuple(np.multiply(angleHipL, [w, h]).astype(int)),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
115
-
116
- # cv2.putText(image, str(int(angleHipL)),(0, 200), cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 2, cv2.LINE_AA)
117
- # # cv2.putText(image, str(angleKneeL), tuple(np.multiply(angleKneeL, [w, h]).astype(int)),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
118
- # cv2.putText(image, str(int(angleKneeL)),(170, 200), cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 2, cv2.LINE_AA)
119
-
120
- # Relative points in terms of w and h
121
- rel_point1 = (int(w * 0), int(h - h * 0.55)) # (0, h - 0.2*h)
122
- rel_point2 = (int(w * 0.265625), int(h - h * 0.55)) # 170 / 640 = 0.265625
123
-
124
- cv2.rectangle(image, (0,110), (280, 225), (127, 248, 236), -1)
125
- cv2.rectangle(image, (0, 113), (277, 222), (12, 85, 61), -1)
126
-
127
- cv2.putText(image, str(int(angleHipL)),(rel_point1), cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 2, cv2.LINE_AA)
128
-
129
- cv2.putText(image, str(int(angleKneeL)),(rel_point2), cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 2, cv2.LINE_AA)
130
-
131
- cv2.putText(image, 'HipL', (10, 140), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
132
-
133
- cv2.putText(image, 'KneeL', (180, 140),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
134
-
135
- if angleKneeL > 110 and stage == 'down':
136
- stage = 'up'
137
- if 18 < angleHipL < 40:
138
- correct += 1
139
-
140
- if 80 < angleKneeL < 110 and stage == 'up':
141
- stage = 'down'
142
-
143
- # REP data
144
- # Setup Status box
145
- cv2.rectangle(image, (0,0), (280, 103), (127, 248, 236), -1)
146
- cv2.rectangle(image, (0, 3), (277, 100), (12, 85, 61), -1)
147
-
148
- cv2.putText(image, 'Left', (10, 22),
149
- cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
150
-
151
- cv2.putText(image, str(correct),
152
- (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 2, cv2.LINE_AA)
153
-
154
- # Stage data for left leg
155
-
156
- cv2.putText(image, 'STAGE', (180, 22),
157
- cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
158
-
159
- cv2.putText(image, stage,
160
- (147, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 2, cv2.LINE_AA)
161
-
162
- mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,mp_drawing.DrawingSpec(color=(255, 175, 0), thickness=2, circle_radius=2),mp_drawing.DrawingSpec(color=(0, 255, 200), thickness=2, circle_radius=2))
163
 
164
  result_queue.put(detections)
165
-
166
  return av.VideoFrame.from_ndarray(image, format="bgr24")
167
 
168
  webrtc_streamer(
@@ -172,4 +140,4 @@ webrtc_streamer(
172
  media_stream_constraints={"video": True, "audio": False},
173
  video_frame_callback=video_frame_callback,
174
  async_processing=True,
175
- )
 
3
  from pathlib import Path
4
  from typing import List, NamedTuple
5
  import mediapipe as mp
 
6
  import av
7
  import cv2
8
  import numpy as np
9
  import streamlit as st
10
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
 
11
  from sample_utils.download import download_file
12
  from sample_utils.turn import get_ice_servers
13
 
 
65
  angle = 360 - angle
66
  return angle
67
 
 
68
  # Detection Queue
69
  result_queue: "queue.Queue[List[Detection]]" = queue.Queue()
70
 
71
+ # Initialize MediaPipe Pose once
72
+ pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
73
+
74
  def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
75
  global counterL, correct, incorrect, stage
76
  if 'stage' not in globals():
 
82
  h, w = image.shape[:2]
83
  image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
84
 
85
+ results = pose.process(image_rgb)
86
+ landmarks = results.pose_landmarks.landmark if results.pose_landmarks else []
87
+
88
+ detections = [
89
+ Detection(
90
+ class_id=0, label="Pose", score=0.5, box=np.array([0, 0, w, h])
91
+ )
92
+ ] if landmarks else []
93
+
94
+ if landmarks:
95
+ hipL = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x, landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]
96
+ kneeL = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].x, landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y]
97
+ ankleL = [landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x, landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y]
98
+ shoulderL = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x, landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
99
+
100
+ angleKneeL = calculate_angle(hipL, kneeL, ankleL)
101
+ angleHipL = calculate_angle(shoulderL, hipL, [hipL[0], 0])
102
+
103
+ rel_point1 = (int(w * 0), int(h - h * 0.55))
104
+ rel_point2 = (int(w * 0.265625), int(h - h * 0.55))
105
+
106
+ cv2.rectangle(image, (0, 110), (280, 225), (127, 248, 236), -1)
107
+ cv2.rectangle(image, (0, 113), (277, 222), (12, 85, 61), -1)
108
+ cv2.putText(image, str(int(angleHipL)), rel_point1, cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 2, cv2.LINE_AA)
109
+ cv2.putText(image, str(int(angleKneeL)), rel_point2, cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 2, cv2.LINE_AA)
110
+
111
+ if angleKneeL > 110 and stage == 'down':
112
+ stage = 'up'
113
+ if 18 < angleHipL < 40:
114
+ correct += 1
115
+
116
+ if 80 < angleKneeL < 110 and stage == 'up':
117
+ stage = 'down'
118
+
119
+ cv2.rectangle(image, (0, 0), (280, 103), (127, 248, 236), -1)
120
+ cv2.rectangle(image, (0, 3), (277, 100), (12, 85, 61), -1)
121
+
122
+ cv2.putText(image, 'Left', (10, 22), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
123
+ cv2.putText(image, str(correct), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 2, cv2.LINE_AA)
124
+ cv2.putText(image, 'STAGE', (180, 22), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
125
+ cv2.putText(image, stage, (147, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 2, cv2.LINE_AA)
126
+
127
+ mp_drawing.draw_landmarks(
128
+ image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
129
+ mp_drawing.DrawingSpec(color=(255, 175, 0), thickness=2, circle_radius=2),
130
+ mp_drawing.DrawingSpec(color=(0, 255, 200), thickness=2, circle_radius=2)
131
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  result_queue.put(detections)
 
134
  return av.VideoFrame.from_ndarray(image, format="bgr24")
135
 
136
  webrtc_streamer(
 
140
  media_stream_constraints={"video": True, "audio": False},
141
  video_frame_callback=video_frame_callback,
142
  async_processing=True,
143
+ )