Spaces:
Sleeping
Sleeping
Updated app with new features
Browse files- __pycache__/utils.cpython-312.pyc +0 -0
- app.py +28 -0
- packages.txt +2 -0
- pages/1_📷️_Live_Stream.py +68 -0
- pages/2_ ⬆️_Upload_Video.py +135 -0
- process_frame.py +554 -0
- requirements.txt +5 -0
- right.png +0 -0
- thresholds.py +55 -0
- utils.py +168 -0
- wrong.png +0 -0
__pycache__/utils.cpython-312.pyc
ADDED
Binary file (7.34 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
|
4 |
+
st.title('Anju AI Fitness Trainer')
|
5 |
+
|
6 |
+
|
7 |
+
#displaying a local video file
|
8 |
+
|
9 |
+
|
10 |
+
# Define the URL of the gif
|
11 |
+
gif_url = "output_recorded.gif"
|
12 |
+
|
13 |
+
# Display the gif using st.image
|
14 |
+
st.image(gif_url)
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
|
packages.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
ffmpeg
|
2 |
+
python3-opencv
|
pages/1_📷️_Live_Stream.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import av
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import streamlit as st
|
5 |
+
from streamlit_webrtc import VideoHTMLAttributes, webrtc_streamer
|
6 |
+
from aiortc.contrib.media import MediaRecorder
|
7 |
+
|
8 |
+
|
9 |
+
BASE_DIR = os.path.abspath(os.path.join(__file__, '../../'))
|
10 |
+
sys.path.append(BASE_DIR)
|
11 |
+
|
12 |
+
|
13 |
+
from utils import get_mediapipe_pose
|
14 |
+
from process_frame import ProcessFrame
|
15 |
+
from thresholds import get_thresholds_beginner, get_thresholds_pro
|
16 |
+
|
17 |
+
|
18 |
+
st.title('Anju AI Fitness Trainer')
|
19 |
+
|
20 |
+
mode = st.radio('Select Mode', ['Beginner', 'Pro'], horizontal=True)
|
21 |
+
|
22 |
+
thresholds = None
|
23 |
+
|
24 |
+
if mode == 'Beginner':
|
25 |
+
thresholds = get_thresholds_beginner()
|
26 |
+
|
27 |
+
elif mode == 'Pro':
|
28 |
+
thresholds = get_thresholds_pro()
|
29 |
+
|
30 |
+
|
31 |
+
live_process_frame = ProcessFrame(thresholds=thresholds, flip_frame=True)
|
32 |
+
# Initialize face mesh solution
|
33 |
+
pose = get_mediapipe_pose()
|
34 |
+
|
35 |
+
|
36 |
+
if 'download' not in st.session_state:
|
37 |
+
st.session_state['download'] = False
|
38 |
+
|
39 |
+
output_video_file = f'output_live.flv'
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
def video_frame_callback(frame: av.VideoFrame):
|
44 |
+
frame = frame.to_ndarray(format="rgb24") # Decode and get RGB frame
|
45 |
+
frame, _ = live_process_frame.process(frame, pose) # Process frame
|
46 |
+
return av.VideoFrame.from_ndarray(frame, format="rgb24") # Encode and return BGR frame
|
47 |
+
|
48 |
+
|
49 |
+
def out_recorder_factory() -> MediaRecorder:
|
50 |
+
return MediaRecorder(output_video_file)
|
51 |
+
|
52 |
+
|
53 |
+
ctx = webrtc_streamer(
|
54 |
+
key="Squats-pose-analysis",
|
55 |
+
video_frame_callback=video_frame_callback,
|
56 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}, # Add this config
|
57 |
+
media_stream_constraints={"video": {"width": {'min':480, 'ideal':480}}, "audio": False},
|
58 |
+
video_html_attrs=VideoHTMLAttributes(autoPlay=True, controls=False, muted=False),
|
59 |
+
out_recorder_factory=None
|
60 |
+
)
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
|
pages/2_ ⬆️_Upload_Video.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import av
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import streamlit as st
|
5 |
+
import cv2
|
6 |
+
import tempfile
|
7 |
+
|
8 |
+
|
9 |
+
BASE_DIR = os.path.abspath(os.path.join(__file__, '../../'))
|
10 |
+
sys.path.append(BASE_DIR)
|
11 |
+
|
12 |
+
|
13 |
+
from utils import get_mediapipe_pose
|
14 |
+
from process_frame import ProcessFrame
|
15 |
+
from thresholds import get_thresholds_beginner, get_thresholds_pro
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
st.title('Anju AI Fitness Trainer')
|
20 |
+
|
21 |
+
mode = st.radio('Select Mode', ['Beginner', 'Pro'], horizontal=True)
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
thresholds = None
|
26 |
+
|
27 |
+
if mode == 'Beginner':
|
28 |
+
thresholds = get_thresholds_beginner()
|
29 |
+
|
30 |
+
elif mode == 'Pro':
|
31 |
+
thresholds = get_thresholds_pro()
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
upload_process_frame = ProcessFrame(thresholds=thresholds)
|
36 |
+
|
37 |
+
# Initialize face mesh solution
|
38 |
+
pose = get_mediapipe_pose()
|
39 |
+
|
40 |
+
|
41 |
+
download = None
|
42 |
+
|
43 |
+
if 'download' not in st.session_state:
|
44 |
+
st.session_state['download'] = False
|
45 |
+
|
46 |
+
|
47 |
+
output_video_file = f'output_recorded.mp4'
|
48 |
+
|
49 |
+
if os.path.exists(output_video_file):
|
50 |
+
os.remove(output_video_file)
|
51 |
+
|
52 |
+
|
53 |
+
with st.form('Upload', clear_on_submit=True):
|
54 |
+
up_file = st.file_uploader("Upload a Video", ['mp4','mov', 'avi'])
|
55 |
+
uploaded = st.form_submit_button("Upload")
|
56 |
+
|
57 |
+
stframe = st.empty()
|
58 |
+
|
59 |
+
ip_vid_str = '<p style="font-family:Helvetica; font-weight: bold; font-size: 16px;">Input Video</p>'
|
60 |
+
warning_str = '<p style="font-family:Helvetica; font-weight: bold; color: Red; font-size: 17px;">Please Upload a Video first!!!</p>'
|
61 |
+
|
62 |
+
warn = st.empty()
|
63 |
+
|
64 |
+
|
65 |
+
download_button = st.empty()
|
66 |
+
|
67 |
+
if up_file and uploaded:
|
68 |
+
|
69 |
+
download_button.empty()
|
70 |
+
tfile = tempfile.NamedTemporaryFile(delete=False)
|
71 |
+
|
72 |
+
try:
|
73 |
+
warn.empty()
|
74 |
+
tfile.write(up_file.read())
|
75 |
+
|
76 |
+
vf = cv2.VideoCapture(tfile.name)
|
77 |
+
|
78 |
+
# --------------------- Write the processed video frame. --------------------
|
79 |
+
fps = int(vf.get(cv2.CAP_PROP_FPS))
|
80 |
+
width = int(vf.get(cv2.CAP_PROP_FRAME_WIDTH))
|
81 |
+
height = int(vf.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
82 |
+
frame_size = (width, height)
|
83 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
84 |
+
video_output = cv2.VideoWriter(output_video_file, fourcc, fps, frame_size)
|
85 |
+
# -----------------------------------------------------------------------------
|
86 |
+
|
87 |
+
|
88 |
+
txt = st.sidebar.markdown(ip_vid_str, unsafe_allow_html=True)
|
89 |
+
ip_video = st.sidebar.video(tfile.name)
|
90 |
+
|
91 |
+
while vf.isOpened():
|
92 |
+
ret, frame = vf.read()
|
93 |
+
if not ret:
|
94 |
+
break
|
95 |
+
|
96 |
+
# convert frame from BGR to RGB before processing it.
|
97 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
98 |
+
out_frame, _ = upload_process_frame.process(frame, pose)
|
99 |
+
stframe.image(out_frame)
|
100 |
+
video_output.write(out_frame[...,::-1])
|
101 |
+
|
102 |
+
|
103 |
+
vf.release()
|
104 |
+
video_output.release()
|
105 |
+
stframe.empty()
|
106 |
+
ip_video.empty()
|
107 |
+
txt.empty()
|
108 |
+
tfile.close()
|
109 |
+
|
110 |
+
except AttributeError:
|
111 |
+
warn.markdown(warning_str, unsafe_allow_html=True)
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
if os.path.exists(output_video_file):
|
116 |
+
with open(output_video_file, 'rb') as op_vid:
|
117 |
+
download = download_button.download_button('Download Video', data = op_vid, file_name='output_recorded.mp4')
|
118 |
+
|
119 |
+
if download:
|
120 |
+
st.session_state['download'] = True
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
if os.path.exists(output_video_file) and st.session_state['download']:
|
125 |
+
os.remove(output_video_file)
|
126 |
+
st.session_state['download'] = False
|
127 |
+
download_button.empty()
|
128 |
+
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
|
process_frame.py
ADDED
@@ -0,0 +1,554 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
from utils import find_angle, get_landmark_features, draw_text
|
5 |
+
|
6 |
+
|
7 |
+
class ProcessFrame:
|
8 |
+
def __init__(self, thresholds, flip_frame = False):
|
9 |
+
|
10 |
+
# Set if frame should be flipped or not.
|
11 |
+
self.flip_frame = flip_frame
|
12 |
+
|
13 |
+
# self.thresholds
|
14 |
+
self.thresholds = thresholds
|
15 |
+
|
16 |
+
# Font type.
|
17 |
+
self.font = cv2.FONT_HERSHEY_SIMPLEX
|
18 |
+
|
19 |
+
# line type
|
20 |
+
self.linetype = cv2.LINE_AA
|
21 |
+
|
22 |
+
# set radius to draw arc
|
23 |
+
self.radius = 20
|
24 |
+
|
25 |
+
# Colors in BGR format.
|
26 |
+
self.COLORS = {
|
27 |
+
'blue' : (0, 127, 255),
|
28 |
+
'red' : (255, 50, 50),
|
29 |
+
'green' : (0, 255, 127),
|
30 |
+
'light_green': (100, 233, 127),
|
31 |
+
'yellow' : (255, 255, 0),
|
32 |
+
'magenta' : (255, 0, 255),
|
33 |
+
'white' : (255,255,255),
|
34 |
+
'cyan' : (0, 255, 255),
|
35 |
+
'light_blue' : (102, 204, 255)
|
36 |
+
}
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
# Dictionary to maintain the various landmark features.
|
41 |
+
self.dict_features = {}
|
42 |
+
self.left_features = {
|
43 |
+
'shoulder': 11,
|
44 |
+
'elbow' : 13,
|
45 |
+
'wrist' : 15,
|
46 |
+
'hip' : 23,
|
47 |
+
'knee' : 25,
|
48 |
+
'ankle' : 27,
|
49 |
+
'foot' : 31
|
50 |
+
}
|
51 |
+
|
52 |
+
self.right_features = {
|
53 |
+
'shoulder': 12,
|
54 |
+
'elbow' : 14,
|
55 |
+
'wrist' : 16,
|
56 |
+
'hip' : 24,
|
57 |
+
'knee' : 26,
|
58 |
+
'ankle' : 28,
|
59 |
+
'foot' : 32
|
60 |
+
}
|
61 |
+
|
62 |
+
self.dict_features['left'] = self.left_features
|
63 |
+
self.dict_features['right'] = self.right_features
|
64 |
+
self.dict_features['nose'] = 0
|
65 |
+
|
66 |
+
|
67 |
+
# For tracking counters and sharing states in and out of callbacks.
|
68 |
+
self.state_tracker = {
|
69 |
+
'state_seq': [],
|
70 |
+
|
71 |
+
'start_inactive_time': time.perf_counter(),
|
72 |
+
'start_inactive_time_front': time.perf_counter(),
|
73 |
+
'INACTIVE_TIME': 0.0,
|
74 |
+
'INACTIVE_TIME_FRONT': 0.0,
|
75 |
+
|
76 |
+
# 0 --> Bend Backwards, 1 --> Bend Forward, 2 --> Keep shin straight, 3 --> Deep squat
|
77 |
+
'DISPLAY_TEXT' : np.full((4,), False),
|
78 |
+
'COUNT_FRAMES' : np.zeros((4,), dtype=np.int64),
|
79 |
+
|
80 |
+
'LOWER_HIPS': False,
|
81 |
+
|
82 |
+
'INCORRECT_POSTURE': False,
|
83 |
+
|
84 |
+
'prev_state': None,
|
85 |
+
'curr_state':None,
|
86 |
+
|
87 |
+
'SQUAT_COUNT': 0,
|
88 |
+
'IMPROPER_SQUAT':0
|
89 |
+
|
90 |
+
}
|
91 |
+
|
92 |
+
self.FEEDBACK_ID_MAP = {
|
93 |
+
0: ('BEND BACKWARDS', 215, (0, 153, 255)),
|
94 |
+
1: ('BEND FORWARD', 215, (0, 153, 255)),
|
95 |
+
2: ('KNEE FALLING OVER TOE', 170, (255, 80, 80)),
|
96 |
+
3: ('SQUAT TOO DEEP', 125, (255, 80, 80))
|
97 |
+
}
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
def _get_state(self, knee_angle):
|
103 |
+
|
104 |
+
knee = None
|
105 |
+
|
106 |
+
if self.thresholds['HIP_KNEE_VERT']['NORMAL'][0] <= knee_angle <= self.thresholds['HIP_KNEE_VERT']['NORMAL'][1]:
|
107 |
+
knee = 1
|
108 |
+
elif self.thresholds['HIP_KNEE_VERT']['TRANS'][0] <= knee_angle <= self.thresholds['HIP_KNEE_VERT']['TRANS'][1]:
|
109 |
+
knee = 2
|
110 |
+
elif self.thresholds['HIP_KNEE_VERT']['PASS'][0] <= knee_angle <= self.thresholds['HIP_KNEE_VERT']['PASS'][1]:
|
111 |
+
knee = 3
|
112 |
+
|
113 |
+
return f's{knee}' if knee else None
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
def _update_state_sequence(self, state):
|
119 |
+
|
120 |
+
if state == 's2':
|
121 |
+
if (('s3' not in self.state_tracker['state_seq']) and (self.state_tracker['state_seq'].count('s2'))==0) or \
|
122 |
+
(('s3' in self.state_tracker['state_seq']) and (self.state_tracker['state_seq'].count('s2')==1)):
|
123 |
+
self.state_tracker['state_seq'].append(state)
|
124 |
+
|
125 |
+
|
126 |
+
elif state == 's3':
|
127 |
+
if (state not in self.state_tracker['state_seq']) and 's2' in self.state_tracker['state_seq']:
|
128 |
+
self.state_tracker['state_seq'].append(state)
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
|
133 |
+
def _show_feedback(self, frame, c_frame, dict_maps, lower_hips_disp):
|
134 |
+
|
135 |
+
|
136 |
+
if lower_hips_disp:
|
137 |
+
draw_text(
|
138 |
+
frame,
|
139 |
+
'LOWER YOUR HIPS',
|
140 |
+
pos=(30, 80),
|
141 |
+
text_color=(0, 0, 0),
|
142 |
+
font_scale=0.6,
|
143 |
+
text_color_bg=(255, 255, 0)
|
144 |
+
)
|
145 |
+
|
146 |
+
for idx in np.where(c_frame)[0]:
|
147 |
+
draw_text(
|
148 |
+
frame,
|
149 |
+
dict_maps[idx][0],
|
150 |
+
pos=(30, dict_maps[idx][1]),
|
151 |
+
text_color=(255, 255, 230),
|
152 |
+
font_scale=0.6,
|
153 |
+
text_color_bg=dict_maps[idx][2]
|
154 |
+
)
|
155 |
+
|
156 |
+
return frame
|
157 |
+
|
158 |
+
|
159 |
+
|
160 |
+
def process(self, frame: np.array, pose):
|
161 |
+
play_sound = None
|
162 |
+
|
163 |
+
|
164 |
+
frame_height, frame_width, _ = frame.shape
|
165 |
+
|
166 |
+
# Process the image.
|
167 |
+
keypoints = pose.process(frame)
|
168 |
+
|
169 |
+
if keypoints.pose_landmarks:
|
170 |
+
ps_lm = keypoints.pose_landmarks
|
171 |
+
|
172 |
+
nose_coord = get_landmark_features(ps_lm.landmark, self.dict_features, 'nose', frame_width, frame_height)
|
173 |
+
left_shldr_coord, left_elbow_coord, left_wrist_coord, left_hip_coord, left_knee_coord, left_ankle_coord, left_foot_coord = \
|
174 |
+
get_landmark_features(ps_lm.landmark, self.dict_features, 'left', frame_width, frame_height)
|
175 |
+
right_shldr_coord, right_elbow_coord, right_wrist_coord, right_hip_coord, right_knee_coord, right_ankle_coord, right_foot_coord = \
|
176 |
+
get_landmark_features(ps_lm.landmark, self.dict_features, 'right', frame_width, frame_height)
|
177 |
+
|
178 |
+
offset_angle = find_angle(left_shldr_coord, right_shldr_coord, nose_coord)
|
179 |
+
|
180 |
+
if offset_angle > self.thresholds['OFFSET_THRESH']:
|
181 |
+
|
182 |
+
display_inactivity = False
|
183 |
+
|
184 |
+
end_time = time.perf_counter()
|
185 |
+
self.state_tracker['INACTIVE_TIME_FRONT'] += end_time - self.state_tracker['start_inactive_time_front']
|
186 |
+
self.state_tracker['start_inactive_time_front'] = end_time
|
187 |
+
|
188 |
+
if self.state_tracker['INACTIVE_TIME_FRONT'] >= self.thresholds['INACTIVE_THRESH']:
|
189 |
+
self.state_tracker['SQUAT_COUNT'] = 0
|
190 |
+
self.state_tracker['IMPROPER_SQUAT'] = 0
|
191 |
+
display_inactivity = True
|
192 |
+
|
193 |
+
cv2.circle(frame, nose_coord, 7, self.COLORS['white'], -1)
|
194 |
+
cv2.circle(frame, left_shldr_coord, 7, self.COLORS['yellow'], -1)
|
195 |
+
cv2.circle(frame, right_shldr_coord, 7, self.COLORS['magenta'], -1)
|
196 |
+
|
197 |
+
if self.flip_frame:
|
198 |
+
frame = cv2.flip(frame, 1)
|
199 |
+
|
200 |
+
if display_inactivity:
|
201 |
+
# cv2.putText(frame, 'Resetting SQUAT_COUNT due to inactivity!!!', (10, frame_height - 90),
|
202 |
+
# self.font, 0.5, self.COLORS['blue'], 2, lineType=self.linetype)
|
203 |
+
play_sound = 'reset_counters'
|
204 |
+
self.state_tracker['INACTIVE_TIME_FRONT'] = 0.0
|
205 |
+
self.state_tracker['start_inactive_time_front'] = time.perf_counter()
|
206 |
+
|
207 |
+
draw_text(
|
208 |
+
frame,
|
209 |
+
"CORRECT: " + str(self.state_tracker['SQUAT_COUNT']),
|
210 |
+
pos=(int(frame_width*0.75), 30),
|
211 |
+
text_color=(255, 255, 230),
|
212 |
+
font_scale=0.7,
|
213 |
+
text_color_bg=(18, 185, 0)
|
214 |
+
)
|
215 |
+
|
216 |
+
|
217 |
+
draw_text(
|
218 |
+
frame,
|
219 |
+
"INCORRECT: " + str(self.state_tracker['IMPROPER_SQUAT']),
|
220 |
+
pos=(int(frame_width*0.75), 80),
|
221 |
+
text_color=(255, 255, 230),
|
222 |
+
font_scale=0.7,
|
223 |
+
text_color_bg=(221, 0, 0),
|
224 |
+
|
225 |
+
)
|
226 |
+
|
227 |
+
|
228 |
+
draw_text(
|
229 |
+
frame,
|
230 |
+
'CAMERA NOT ALIGNED PROPERLY!!!',
|
231 |
+
pos=(30, frame_height-60),
|
232 |
+
text_color=(255, 255, 230),
|
233 |
+
font_scale=0.65,
|
234 |
+
text_color_bg=(255, 153, 0),
|
235 |
+
)
|
236 |
+
|
237 |
+
|
238 |
+
draw_text(
|
239 |
+
frame,
|
240 |
+
'OFFSET ANGLE: '+str(offset_angle),
|
241 |
+
pos=(30, frame_height-30),
|
242 |
+
text_color=(255, 255, 230),
|
243 |
+
font_scale=0.65,
|
244 |
+
text_color_bg=(255, 153, 0),
|
245 |
+
)
|
246 |
+
|
247 |
+
# Reset inactive times for side view.
|
248 |
+
self.state_tracker['start_inactive_time'] = time.perf_counter()
|
249 |
+
self.state_tracker['INACTIVE_TIME'] = 0.0
|
250 |
+
self.state_tracker['prev_state'] = None
|
251 |
+
self.state_tracker['curr_state'] = None
|
252 |
+
|
253 |
+
# Camera is aligned properly.
|
254 |
+
else:
|
255 |
+
|
256 |
+
self.state_tracker['INACTIVE_TIME_FRONT'] = 0.0
|
257 |
+
self.state_tracker['start_inactive_time_front'] = time.perf_counter()
|
258 |
+
|
259 |
+
|
260 |
+
dist_l_sh_hip = abs(left_foot_coord[1]- left_shldr_coord[1])
|
261 |
+
dist_r_sh_hip = abs(right_foot_coord[1] - right_shldr_coord)[1]
|
262 |
+
|
263 |
+
shldr_coord = None
|
264 |
+
elbow_coord = None
|
265 |
+
wrist_coord = None
|
266 |
+
hip_coord = None
|
267 |
+
knee_coord = None
|
268 |
+
ankle_coord = None
|
269 |
+
foot_coord = None
|
270 |
+
|
271 |
+
if dist_l_sh_hip > dist_r_sh_hip:
|
272 |
+
shldr_coord = left_shldr_coord
|
273 |
+
elbow_coord = left_elbow_coord
|
274 |
+
wrist_coord = left_wrist_coord
|
275 |
+
hip_coord = left_hip_coord
|
276 |
+
knee_coord = left_knee_coord
|
277 |
+
ankle_coord = left_ankle_coord
|
278 |
+
foot_coord = left_foot_coord
|
279 |
+
|
280 |
+
multiplier = -1
|
281 |
+
|
282 |
+
|
283 |
+
else:
|
284 |
+
shldr_coord = right_shldr_coord
|
285 |
+
elbow_coord = right_elbow_coord
|
286 |
+
wrist_coord = right_wrist_coord
|
287 |
+
hip_coord = right_hip_coord
|
288 |
+
knee_coord = right_knee_coord
|
289 |
+
ankle_coord = right_ankle_coord
|
290 |
+
foot_coord = right_foot_coord
|
291 |
+
|
292 |
+
multiplier = 1
|
293 |
+
|
294 |
+
|
295 |
+
# ------------------- Verical Angle calculation --------------
|
296 |
+
|
297 |
+
hip_vertical_angle = find_angle(shldr_coord, np.array([hip_coord[0], 0]), hip_coord)
|
298 |
+
cv2.ellipse(frame, hip_coord, (30, 30),
|
299 |
+
angle = 0, startAngle = -90, endAngle = -90+multiplier*hip_vertical_angle,
|
300 |
+
color = self.COLORS['white'], thickness = 3, lineType = self.linetype)
|
301 |
+
|
302 |
+
cv2.line(frame, (hip_coord[0], hip_coord[1] + 20), (hip_coord[0], hip_coord[1] - 80), self.COLORS['blue'], 4, lineType=self.linetype)
|
303 |
+
|
304 |
+
|
305 |
+
|
306 |
+
|
307 |
+
knee_vertical_angle = find_angle(hip_coord, np.array([knee_coord[0], 0]), knee_coord)
|
308 |
+
cv2.ellipse(frame, knee_coord, (20, 20),
|
309 |
+
angle = 0, startAngle = -90, endAngle = -90-multiplier*knee_vertical_angle,
|
310 |
+
color = self.COLORS['white'], thickness = 3, lineType = self.linetype)
|
311 |
+
|
312 |
+
cv2.line(frame, (knee_coord[0], knee_coord[1] + 20), (knee_coord[0], knee_coord[1] - 50), self.COLORS['blue'], 4, lineType=self.linetype)
|
313 |
+
|
314 |
+
|
315 |
+
|
316 |
+
ankle_vertical_angle = find_angle(knee_coord, np.array([ankle_coord[0], 0]), ankle_coord)
|
317 |
+
cv2.ellipse(frame, ankle_coord, (30, 30),
|
318 |
+
angle = 0, startAngle = -90, endAngle = -90 + multiplier*ankle_vertical_angle,
|
319 |
+
color = self.COLORS['white'], thickness = 3, lineType=self.linetype)
|
320 |
+
|
321 |
+
cv2.line(frame, (ankle_coord[0], ankle_coord[1] + 20), (ankle_coord[0], ankle_coord[1] - 50), self.COLORS['blue'], 4, lineType=self.linetype)
|
322 |
+
|
323 |
+
# ------------------------------------------------------------
|
324 |
+
|
325 |
+
|
326 |
+
# Join landmarks.
|
327 |
+
cv2.line(frame, shldr_coord, elbow_coord, self.COLORS['light_blue'], 4, lineType=self.linetype)
|
328 |
+
cv2.line(frame, wrist_coord, elbow_coord, self.COLORS['light_blue'], 4, lineType=self.linetype)
|
329 |
+
cv2.line(frame, shldr_coord, hip_coord, self.COLORS['light_blue'], 4, lineType=self.linetype)
|
330 |
+
cv2.line(frame, knee_coord, hip_coord, self.COLORS['light_blue'], 4, lineType=self.linetype)
|
331 |
+
cv2.line(frame, ankle_coord, knee_coord,self.COLORS['light_blue'], 4, lineType=self.linetype)
|
332 |
+
cv2.line(frame, ankle_coord, foot_coord, self.COLORS['light_blue'], 4, lineType=self.linetype)
|
333 |
+
|
334 |
+
# Plot landmark points
|
335 |
+
cv2.circle(frame, shldr_coord, 7, self.COLORS['yellow'], -1, lineType=self.linetype)
|
336 |
+
cv2.circle(frame, elbow_coord, 7, self.COLORS['yellow'], -1, lineType=self.linetype)
|
337 |
+
cv2.circle(frame, wrist_coord, 7, self.COLORS['yellow'], -1, lineType=self.linetype)
|
338 |
+
cv2.circle(frame, hip_coord, 7, self.COLORS['yellow'], -1, lineType=self.linetype)
|
339 |
+
cv2.circle(frame, knee_coord, 7, self.COLORS['yellow'], -1, lineType=self.linetype)
|
340 |
+
cv2.circle(frame, ankle_coord, 7, self.COLORS['yellow'], -1, lineType=self.linetype)
|
341 |
+
cv2.circle(frame, foot_coord, 7, self.COLORS['yellow'], -1, lineType=self.linetype)
|
342 |
+
|
343 |
+
|
344 |
+
|
345 |
+
current_state = self._get_state(int(knee_vertical_angle))
|
346 |
+
self.state_tracker['curr_state'] = current_state
|
347 |
+
self._update_state_sequence(current_state)
|
348 |
+
|
349 |
+
|
350 |
+
|
351 |
+
# -------------------------------------- COMPUTE COUNTERS --------------------------------------
|
352 |
+
|
353 |
+
if current_state == 's1':
|
354 |
+
|
355 |
+
if len(self.state_tracker['state_seq']) == 3 and not self.state_tracker['INCORRECT_POSTURE']:
|
356 |
+
self.state_tracker['SQUAT_COUNT']+=1
|
357 |
+
play_sound = str(self.state_tracker['SQUAT_COUNT'])
|
358 |
+
|
359 |
+
elif 's2' in self.state_tracker['state_seq'] and len(self.state_tracker['state_seq'])==1:
|
360 |
+
self.state_tracker['IMPROPER_SQUAT']+=1
|
361 |
+
play_sound = 'incorrect'
|
362 |
+
|
363 |
+
elif self.state_tracker['INCORRECT_POSTURE']:
|
364 |
+
self.state_tracker['IMPROPER_SQUAT']+=1
|
365 |
+
play_sound = 'incorrect'
|
366 |
+
|
367 |
+
|
368 |
+
self.state_tracker['state_seq'] = []
|
369 |
+
self.state_tracker['INCORRECT_POSTURE'] = False
|
370 |
+
|
371 |
+
|
372 |
+
# ----------------------------------------------------------------------------------------------------
|
373 |
+
|
374 |
+
|
375 |
+
|
376 |
+
|
377 |
+
# -------------------------------------- PERFORM FEEDBACK ACTIONS --------------------------------------
|
378 |
+
|
379 |
+
else:
|
380 |
+
if hip_vertical_angle > self.thresholds['HIP_THRESH'][1]:
|
381 |
+
self.state_tracker['DISPLAY_TEXT'][0] = True
|
382 |
+
|
383 |
+
|
384 |
+
elif hip_vertical_angle < self.thresholds['HIP_THRESH'][0] and \
|
385 |
+
self.state_tracker['state_seq'].count('s2')==1:
|
386 |
+
self.state_tracker['DISPLAY_TEXT'][1] = True
|
387 |
+
|
388 |
+
|
389 |
+
|
390 |
+
if self.thresholds['KNEE_THRESH'][0] < knee_vertical_angle < self.thresholds['KNEE_THRESH'][1] and \
|
391 |
+
self.state_tracker['state_seq'].count('s2')==1:
|
392 |
+
self.state_tracker['LOWER_HIPS'] = True
|
393 |
+
|
394 |
+
|
395 |
+
elif knee_vertical_angle > self.thresholds['KNEE_THRESH'][2]:
|
396 |
+
self.state_tracker['DISPLAY_TEXT'][3] = True
|
397 |
+
self.state_tracker['INCORRECT_POSTURE'] = True
|
398 |
+
|
399 |
+
|
400 |
+
if (ankle_vertical_angle > self.thresholds['ANKLE_THRESH']):
|
401 |
+
self.state_tracker['DISPLAY_TEXT'][2] = True
|
402 |
+
self.state_tracker['INCORRECT_POSTURE'] = True
|
403 |
+
|
404 |
+
|
405 |
+
# ----------------------------------------------------------------------------------------------------
|
406 |
+
|
407 |
+
|
408 |
+
|
409 |
+
|
410 |
+
# ----------------------------------- COMPUTE INACTIVITY ---------------------------------------------
|
411 |
+
|
412 |
+
display_inactivity = False
|
413 |
+
|
414 |
+
if self.state_tracker['curr_state'] == self.state_tracker['prev_state']:
|
415 |
+
|
416 |
+
end_time = time.perf_counter()
|
417 |
+
self.state_tracker['INACTIVE_TIME'] += end_time - self.state_tracker['start_inactive_time']
|
418 |
+
self.state_tracker['start_inactive_time'] = end_time
|
419 |
+
|
420 |
+
if self.state_tracker['INACTIVE_TIME'] >= self.thresholds['INACTIVE_THRESH']:
|
421 |
+
self.state_tracker['SQUAT_COUNT'] = 0
|
422 |
+
self.state_tracker['IMPROPER_SQUAT'] = 0
|
423 |
+
display_inactivity = True
|
424 |
+
|
425 |
+
|
426 |
+
else:
|
427 |
+
|
428 |
+
self.state_tracker['start_inactive_time'] = time.perf_counter()
|
429 |
+
self.state_tracker['INACTIVE_TIME'] = 0.0
|
430 |
+
|
431 |
+
# -------------------------------------------------------------------------------------------------------
|
432 |
+
|
433 |
+
|
434 |
+
|
435 |
+
hip_text_coord_x = hip_coord[0] + 10
|
436 |
+
knee_text_coord_x = knee_coord[0] + 15
|
437 |
+
ankle_text_coord_x = ankle_coord[0] + 10
|
438 |
+
|
439 |
+
if self.flip_frame:
|
440 |
+
frame = cv2.flip(frame, 1)
|
441 |
+
hip_text_coord_x = frame_width - hip_coord[0] + 10
|
442 |
+
knee_text_coord_x = frame_width - knee_coord[0] + 15
|
443 |
+
ankle_text_coord_x = frame_width - ankle_coord[0] + 10
|
444 |
+
|
445 |
+
|
446 |
+
|
447 |
+
if 's3' in self.state_tracker['state_seq']:
|
448 |
+
self.state_tracker['LOWER_HIPS'] = False
|
449 |
+
|
450 |
+
self.state_tracker['COUNT_FRAMES'][self.state_tracker['DISPLAY_TEXT']]+=1
|
451 |
+
|
452 |
+
frame = self._show_feedback(frame, self.state_tracker['COUNT_FRAMES'], self.FEEDBACK_ID_MAP, self.state_tracker['LOWER_HIPS'])
|
453 |
+
|
454 |
+
|
455 |
+
|
456 |
+
if display_inactivity:
|
457 |
+
# cv2.putText(frame, 'Resetting COUNTERS due to inactivity!!!', (10, frame_height - 20), self.font, 0.5, self.COLORS['blue'], 2, lineType=self.linetype)
|
458 |
+
play_sound = 'reset_counters'
|
459 |
+
self.state_tracker['start_inactive_time'] = time.perf_counter()
|
460 |
+
self.state_tracker['INACTIVE_TIME'] = 0.0
|
461 |
+
|
462 |
+
|
463 |
+
cv2.putText(frame, str(int(hip_vertical_angle)), (hip_text_coord_x, hip_coord[1]), self.font, 0.6, self.COLORS['light_green'], 2, lineType=self.linetype)
|
464 |
+
cv2.putText(frame, str(int(knee_vertical_angle)), (knee_text_coord_x, knee_coord[1]+10), self.font, 0.6, self.COLORS['light_green'], 2, lineType=self.linetype)
|
465 |
+
cv2.putText(frame, str(int(ankle_vertical_angle)), (ankle_text_coord_x, ankle_coord[1]), self.font, 0.6, self.COLORS['light_green'], 2, lineType=self.linetype)
|
466 |
+
|
467 |
+
|
468 |
+
draw_text(
|
469 |
+
frame,
|
470 |
+
"CORRECT: " + str(self.state_tracker['SQUAT_COUNT']),
|
471 |
+
pos=(int(frame_width*0.75), 30),
|
472 |
+
text_color=(255, 255, 230),
|
473 |
+
font_scale=0.7,
|
474 |
+
text_color_bg=(18, 185, 0)
|
475 |
+
)
|
476 |
+
|
477 |
+
|
478 |
+
draw_text(
|
479 |
+
frame,
|
480 |
+
"INCORRECT: " + str(self.state_tracker['IMPROPER_SQUAT']),
|
481 |
+
pos=(int(frame_width*0.75), 80),
|
482 |
+
text_color=(255, 255, 230),
|
483 |
+
font_scale=0.7,
|
484 |
+
text_color_bg=(221, 0, 0),
|
485 |
+
|
486 |
+
)
|
487 |
+
|
488 |
+
|
489 |
+
self.state_tracker['DISPLAY_TEXT'][self.state_tracker['COUNT_FRAMES'] > self.thresholds['CNT_FRAME_THRESH']] = False
|
490 |
+
self.state_tracker['COUNT_FRAMES'][self.state_tracker['COUNT_FRAMES'] > self.thresholds['CNT_FRAME_THRESH']] = 0
|
491 |
+
self.state_tracker['prev_state'] = current_state
|
492 |
+
|
493 |
+
|
494 |
+
|
495 |
+
|
496 |
+
else:
|
497 |
+
|
498 |
+
if self.flip_frame:
|
499 |
+
frame = cv2.flip(frame, 1)
|
500 |
+
|
501 |
+
end_time = time.perf_counter()
|
502 |
+
self.state_tracker['INACTIVE_TIME'] += end_time - self.state_tracker['start_inactive_time']
|
503 |
+
|
504 |
+
display_inactivity = False
|
505 |
+
|
506 |
+
if self.state_tracker['INACTIVE_TIME'] >= self.thresholds['INACTIVE_THRESH']:
|
507 |
+
self.state_tracker['SQUAT_COUNT'] = 0
|
508 |
+
self.state_tracker['IMPROPER_SQUAT'] = 0
|
509 |
+
# cv2.putText(frame, 'Resetting SQUAT_COUNT due to inactivity!!!', (10, frame_height - 25), self.font, 0.7, self.COLORS['blue'], 2)
|
510 |
+
display_inactivity = True
|
511 |
+
|
512 |
+
self.state_tracker['start_inactive_time'] = end_time
|
513 |
+
|
514 |
+
draw_text(
|
515 |
+
frame,
|
516 |
+
"CORRECT: " + str(self.state_tracker['SQUAT_COUNT']),
|
517 |
+
pos=(int(frame_width*0.75), 30),
|
518 |
+
text_color=(255, 255, 230),
|
519 |
+
font_scale=0.7,
|
520 |
+
text_color_bg=(18, 185, 0)
|
521 |
+
)
|
522 |
+
|
523 |
+
|
524 |
+
draw_text(
|
525 |
+
frame,
|
526 |
+
"INCORRECT: " + str(self.state_tracker['IMPROPER_SQUAT']),
|
527 |
+
pos=(int(frame_width*0.75), 80),
|
528 |
+
text_color=(255, 255, 230),
|
529 |
+
font_scale=0.7,
|
530 |
+
text_color_bg=(221, 0, 0),
|
531 |
+
|
532 |
+
)
|
533 |
+
|
534 |
+
if display_inactivity:
|
535 |
+
play_sound = 'reset_counters'
|
536 |
+
self.state_tracker['start_inactive_time'] = time.perf_counter()
|
537 |
+
self.state_tracker['INACTIVE_TIME'] = 0.0
|
538 |
+
|
539 |
+
|
540 |
+
# Reset all other state variables
|
541 |
+
|
542 |
+
self.state_tracker['prev_state'] = None
|
543 |
+
self.state_tracker['curr_state'] = None
|
544 |
+
self.state_tracker['INACTIVE_TIME_FRONT'] = 0.0
|
545 |
+
self.state_tracker['INCORRECT_POSTURE'] = False
|
546 |
+
self.state_tracker['DISPLAY_TEXT'] = np.full((5,), False)
|
547 |
+
self.state_tracker['COUNT_FRAMES'] = np.zeros((5,), dtype=np.int64)
|
548 |
+
self.state_tracker['start_inactive_time_front'] = time.perf_counter()
|
549 |
+
|
550 |
+
|
551 |
+
|
552 |
+
return frame, play_sound
|
553 |
+
|
554 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
numpy
|
2 |
+
opencv-python-headless
|
3 |
+
mediapipe
|
4 |
+
altair<5
|
5 |
+
streamlit_webrtc
|
right.png
ADDED
thresholds.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
# Get thresholds for beginner mode
|
4 |
+
def get_thresholds_beginner():
|
5 |
+
|
6 |
+
_ANGLE_HIP_KNEE_VERT = {
|
7 |
+
'NORMAL' : (0, 30),
|
8 |
+
'TRANS' : (35, 65),
|
9 |
+
'PASS' : (70, 95)
|
10 |
+
}
|
11 |
+
|
12 |
+
|
13 |
+
thresholds = {
|
14 |
+
'HIP_KNEE_VERT': _ANGLE_HIP_KNEE_VERT,
|
15 |
+
|
16 |
+
'HIP_THRESH' : [10, 60],
|
17 |
+
'ANKLE_THRESH' : 45,
|
18 |
+
'KNEE_THRESH' : [50, 70, 95],
|
19 |
+
|
20 |
+
'OFFSET_THRESH' : 50.0,
|
21 |
+
'INACTIVE_THRESH' : 15.0,
|
22 |
+
|
23 |
+
'CNT_FRAME_THRESH' : 50
|
24 |
+
|
25 |
+
}
|
26 |
+
|
27 |
+
return thresholds
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
# Get thresholds for beginner mode
|
32 |
+
def get_thresholds_pro():
|
33 |
+
|
34 |
+
_ANGLE_HIP_KNEE_VERT = {
|
35 |
+
'NORMAL' : (0, 30),
|
36 |
+
'TRANS' : (35, 65),
|
37 |
+
'PASS' : (80, 95)
|
38 |
+
}
|
39 |
+
|
40 |
+
|
41 |
+
thresholds = {
|
42 |
+
'HIP_KNEE_VERT': _ANGLE_HIP_KNEE_VERT,
|
43 |
+
|
44 |
+
'HIP_THRESH' : [15, 50],
|
45 |
+
'ANKLE_THRESH' : 30,
|
46 |
+
'KNEE_THRESH' : [50, 80, 95],
|
47 |
+
|
48 |
+
'OFFSET_THRESH' : 50.0,
|
49 |
+
'INACTIVE_THRESH' : 15.0,
|
50 |
+
|
51 |
+
'CNT_FRAME_THRESH' : 50
|
52 |
+
|
53 |
+
}
|
54 |
+
|
55 |
+
return thresholds
|
utils.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import mediapipe as mp
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
correct = cv2.imread('right.png')
|
6 |
+
correct = cv2.cvtColor(correct, cv2.COLOR_BGR2RGB)
|
7 |
+
incorrect = cv2.imread('wrong.png')
|
8 |
+
incorrect = cv2.cvtColor(incorrect, cv2.COLOR_BGR2RGB)
|
9 |
+
|
10 |
+
def draw_rounded_rect(img, rect_start, rect_end, corner_width, box_color):
|
11 |
+
|
12 |
+
x1, y1 = rect_start
|
13 |
+
x2, y2 = rect_end
|
14 |
+
w = corner_width
|
15 |
+
|
16 |
+
# draw filled rectangles
|
17 |
+
cv2.rectangle(img, (x1 + w, y1), (x2 - w, y1 + w), box_color, -1)
|
18 |
+
cv2.rectangle(img, (x1 + w, y2 - w), (x2 - w, y2), box_color, -1)
|
19 |
+
cv2.rectangle(img, (x1, y1 + w), (x1 + w, y2 - w), box_color, -1)
|
20 |
+
cv2.rectangle(img, (x2 - w, y1 + w), (x2, y2 - w), box_color, -1)
|
21 |
+
cv2.rectangle(img, (x1 + w, y1 + w), (x2 - w, y2 - w), box_color, -1)
|
22 |
+
|
23 |
+
|
24 |
+
# draw filled ellipses
|
25 |
+
cv2.ellipse(img, (x1 + w, y1 + w), (w, w),
|
26 |
+
angle = 0, startAngle = -90, endAngle = -180, color = box_color, thickness = -1)
|
27 |
+
|
28 |
+
cv2.ellipse(img, (x2 - w, y1 + w), (w, w),
|
29 |
+
angle = 0, startAngle = 0, endAngle = -90, color = box_color, thickness = -1)
|
30 |
+
|
31 |
+
cv2.ellipse(img, (x1 + w, y2 - w), (w, w),
|
32 |
+
angle = 0, startAngle = 90, endAngle = 180, color = box_color, thickness = -1)
|
33 |
+
|
34 |
+
cv2.ellipse(img, (x2 - w, y2 - w), (w, w),
|
35 |
+
angle = 0, startAngle = 0, endAngle = 90, color = box_color, thickness = -1)
|
36 |
+
|
37 |
+
return img
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
def draw_dotted_line(frame, lm_coord, start, end, line_color):
|
43 |
+
pix_step = 0
|
44 |
+
|
45 |
+
for i in range(start, end+1, 8):
|
46 |
+
cv2.circle(frame, (lm_coord[0], i+pix_step), 2, line_color, -1, lineType=cv2.LINE_AA)
|
47 |
+
|
48 |
+
return frame
|
49 |
+
|
50 |
+
def draw_text(
|
51 |
+
img,
|
52 |
+
msg,
|
53 |
+
width = 7,
|
54 |
+
font=cv2.FONT_HERSHEY_SIMPLEX,
|
55 |
+
pos=(0, 0),
|
56 |
+
font_scale=1,
|
57 |
+
font_thickness=2,
|
58 |
+
text_color=(0, 255, 0),
|
59 |
+
text_color_bg=(0, 0, 0),
|
60 |
+
box_offset=(20, 10),
|
61 |
+
overlay_image = False,
|
62 |
+
overlay_type = None
|
63 |
+
):
|
64 |
+
|
65 |
+
offset = box_offset
|
66 |
+
x, y = pos
|
67 |
+
text_size, _ = cv2.getTextSize(msg, font, font_scale, font_thickness)
|
68 |
+
text_w, text_h = text_size
|
69 |
+
|
70 |
+
rec_start = tuple(p - o for p, o in zip(pos, offset))
|
71 |
+
rec_end = tuple(m + n - o for m, n, o in zip((x + text_w, y + text_h), offset, (25, 0)))
|
72 |
+
|
73 |
+
resize_height = 0
|
74 |
+
|
75 |
+
if overlay_image:
|
76 |
+
resize_height = rec_end[1] - rec_start[1]
|
77 |
+
# print("Height: ", resize_height)
|
78 |
+
# print("Width: ", rec_end[0] - rec_start[0])
|
79 |
+
img = draw_rounded_rect(img, rec_start, (rec_end[0]+resize_height, rec_end[1]), width, text_color_bg)
|
80 |
+
if overlay_type == "correct":
|
81 |
+
overlay_res = cv2.resize(correct, (resize_height, resize_height), interpolation = cv2.INTER_AREA)
|
82 |
+
elif overlay_type == "incorrect":
|
83 |
+
overlay_res = cv2.resize(incorrect, (resize_height, resize_height), interpolation = cv2.INTER_AREA)
|
84 |
+
|
85 |
+
img[rec_start[1]:rec_start[1]+resize_height, rec_start[0]+width:rec_start[0]+width+resize_height] = overlay_res
|
86 |
+
|
87 |
+
else:
|
88 |
+
img = draw_rounded_rect(img, rec_start, rec_end, width, text_color_bg)
|
89 |
+
|
90 |
+
|
91 |
+
cv2.putText(
|
92 |
+
img,
|
93 |
+
msg,
|
94 |
+
(int(rec_start[0]+resize_height + 8), int(y + text_h + font_scale - 1)),
|
95 |
+
font,
|
96 |
+
font_scale,
|
97 |
+
text_color,
|
98 |
+
font_thickness,
|
99 |
+
cv2.LINE_AA,
|
100 |
+
)
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
return text_size
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
def find_angle(p1, p2, ref_pt = np.array([0,0])):
|
109 |
+
p1_ref = p1 - ref_pt
|
110 |
+
p2_ref = p2 - ref_pt
|
111 |
+
|
112 |
+
cos_theta = (np.dot(p1_ref,p2_ref)) / (1.0 * np.linalg.norm(p1_ref) * np.linalg.norm(p2_ref))
|
113 |
+
theta = np.arccos(np.clip(cos_theta, -1.0, 1.0))
|
114 |
+
|
115 |
+
degree = int(180 / np.pi) * theta
|
116 |
+
|
117 |
+
return int(degree)
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
def get_landmark_array(pose_landmark, key, frame_width, frame_height):
|
124 |
+
|
125 |
+
denorm_x = int(pose_landmark[key].x * frame_width)
|
126 |
+
denorm_y = int(pose_landmark[key].y * frame_height)
|
127 |
+
|
128 |
+
return np.array([denorm_x, denorm_y])
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
|
133 |
+
def get_landmark_features(kp_results, dict_features, feature, frame_width, frame_height):
|
134 |
+
|
135 |
+
if feature == 'nose':
|
136 |
+
return get_landmark_array(kp_results, dict_features[feature], frame_width, frame_height)
|
137 |
+
|
138 |
+
elif feature == 'left' or 'right':
|
139 |
+
shldr_coord = get_landmark_array(kp_results, dict_features[feature]['shoulder'], frame_width, frame_height)
|
140 |
+
elbow_coord = get_landmark_array(kp_results, dict_features[feature]['elbow'], frame_width, frame_height)
|
141 |
+
wrist_coord = get_landmark_array(kp_results, dict_features[feature]['wrist'], frame_width, frame_height)
|
142 |
+
hip_coord = get_landmark_array(kp_results, dict_features[feature]['hip'], frame_width, frame_height)
|
143 |
+
knee_coord = get_landmark_array(kp_results, dict_features[feature]['knee'], frame_width, frame_height)
|
144 |
+
ankle_coord = get_landmark_array(kp_results, dict_features[feature]['ankle'], frame_width, frame_height)
|
145 |
+
foot_coord = get_landmark_array(kp_results, dict_features[feature]['foot'], frame_width, frame_height)
|
146 |
+
|
147 |
+
return shldr_coord, elbow_coord, wrist_coord, hip_coord, knee_coord, ankle_coord, foot_coord
|
148 |
+
|
149 |
+
else:
|
150 |
+
raise ValueError("feature needs to be either 'nose', 'left' or 'right")
|
151 |
+
|
152 |
+
|
153 |
+
def get_mediapipe_pose(
|
154 |
+
static_image_mode = False,
|
155 |
+
model_complexity = 1,
|
156 |
+
smooth_landmarks = True,
|
157 |
+
min_detection_confidence = 0.5,
|
158 |
+
min_tracking_confidence = 0.5
|
159 |
+
|
160 |
+
):
|
161 |
+
pose = mp.solutions.pose.Pose(
|
162 |
+
static_image_mode = static_image_mode,
|
163 |
+
model_complexity = model_complexity,
|
164 |
+
smooth_landmarks = smooth_landmarks,
|
165 |
+
min_detection_confidence = min_detection_confidence,
|
166 |
+
min_tracking_confidence = min_tracking_confidence
|
167 |
+
)
|
168 |
+
return pose
|
wrong.png
ADDED