randomshit11 commited on
Commit
5bd3c8d
·
verified ·
1 Parent(s): b9eec86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -0
app.py CHANGED
@@ -1,3 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import cv2
3
  import mediapipe as mp
@@ -73,6 +148,38 @@ class VideoProcessor:
73
  mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2))
74
  return image
75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  # Define Streamlit app
77
  def main():
78
  st.title("Real-time Exercise Detection")
 
1
+ # import streamlit as st
2
+ # import cv2
3
+ # import mediapipe as mp
4
+ # import numpy as np
5
+ # import math
6
+ # from tensorflow.keras.models import Model
7
+ # from tensorflow.keras.layers import (LSTM, Dense, Dropout, Input, Flatten,
8
+ # Bidirectional, Permute, multiply)
9
+
10
+ # # Load the pose estimation model from Mediapipe
11
+ # mp_pose = mp.solutions.pose
12
+ # mp_drawing = mp.solutions.drawing_utils
13
+ # pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
14
+
15
+ # # Define the attention block for the LSTM model
16
+ # def attention_block(inputs, time_steps):
17
+ # a = Permute((2, 1))(inputs)
18
+ # a = Dense(time_steps, activation='softmax')(a)
19
+ # a_probs = Permute((2, 1), name='attention_vec')(a)
20
+ # output_attention_mul = multiply([inputs, a_probs], name='attention_mul')
21
+ # return output_attention_mul
22
+
23
+ # # Build and load the LSTM model
24
+ # @st.cache(allow_output_mutation=True)
25
+ # def build_model(HIDDEN_UNITS=256, sequence_length=30, num_input_values=33*4, num_classes=3):
26
+ # inputs = Input(shape=(sequence_length, num_input_values))
27
+ # lstm_out = Bidirectional(LSTM(HIDDEN_UNITS, return_sequences=True))(inputs)
28
+ # attention_mul = attention_block(lstm_out, sequence_length)
29
+ # attention_mul = Flatten()(attention_mul)
30
+ # x = Dense(2*HIDDEN_UNITS, activation='relu')(attention_mul)
31
+ # x = Dropout(0.5)(x)
32
+ # x = Dense(num_classes, activation='softmax')(x)
33
+ # model = Model(inputs=[inputs], outputs=x)
34
+ # load_dir = "./models/LSTM_Attention.h5"
35
+ # model.load_weights(load_dir)
36
+ # return model
37
+
38
+ # # Define the VideoProcessor class for real-time video processing
39
+ # class VideoProcessor:
40
+ # def __init__(self):
41
+ # self.actions = np.array(['curl', 'press', 'squat'])
42
+ # self.sequence_length = 30
43
+ # self.colors = [(245,117,16), (117,245,16), (16,117,245)]
44
+ # self.pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
45
+ # self.model = build_model()
46
+
47
+ # def process_video(self, video_file):
48
+ # # Get the filename from the file object
49
+ # filename = video_file.name
50
+ # # Create a temporary file to write the contents of the uploaded video file
51
+ # temp_file = open(filename, 'wb')
52
+ # temp_file.write(video_file.read())
53
+ # temp_file.close()
54
+ # # Now we can open the video file using cv2.VideoCapture()
55
+ # cap = cv2.VideoCapture(filename)
56
+ # out_frames = []
57
+ # while cap.isOpened():
58
+ # ret, frame = cap.read()
59
+ # if not ret:
60
+ # break
61
+ # frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
62
+ # results = self.pose.process(frame_rgb)
63
+ # frame = self.draw_landmarks(frame, results)
64
+ # out_frames.append(frame)
65
+ # cap.release()
66
+ # # Remove the temporary file
67
+ # os.remove(filename)
68
+ # return out_frames
69
+
70
+ # def draw_landmarks(self, image, results):
71
+ # mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
72
+ # mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2),
73
+ # mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2))
74
+ # return image
75
+
76
  import streamlit as st
77
  import cv2
78
  import mediapipe as mp
 
148
  mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2))
149
  return image
150
 
151
+ @st.cache()
152
+ def extract_keypoints(self, results):
153
+ pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(33*4)
154
+ return pose
155
+
156
+ @st.cache()
157
+ def calculate_angle(self, a, b, c):
158
+ a = np.array(a) # First
159
+ b = np.array(b) # Mid
160
+ c = np.array(c) # End
161
+ radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])
162
+ angle = np.abs(radians*180.0/np.pi)
163
+ if angle > 180.0:
164
+ angle = 360-angle
165
+ return angle
166
+
167
+ @st.cache()
168
+ def get_coordinates(self, landmarks, side, joint):
169
+ coord = getattr(self.mp_pose.PoseLandmark, side.upper() + "_" + joint.upper())
170
+ x_coord_val = landmarks[coord.value].x
171
+ y_coord_val = landmarks[coord.value].y
172
+ return [x_coord_val, y_coord_val]
173
+
174
+ @st.cache()
175
+ def viz_joint_angle(self, image, angle, joint):
176
+ cv2.putText(image, str(int(angle)),
177
+ tuple(np.multiply(joint, [640, 480]).astype(int)),
178
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA
179
+ )
180
+ return
181
+
182
+
183
  # Define Streamlit app
184
  def main():
185
  st.title("Real-time Exercise Detection")