randomshit11 commited on
Commit
04bfe32
·
verified ·
1 Parent(s): 1d15de2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -71
app.py CHANGED
@@ -1,77 +1,40 @@
1
- # import streamlit as st
2
- # import cv2
3
- # import mediapipe as mp
4
- # import numpy as np
5
- # import math
6
- # from tensorflow.keras.models import Model
7
- # from tensorflow.keras.layers import (LSTM, Dense, Dropout, Input, Flatten,
8
- # Bidirectional, Permute, multiply)
 
9
 
10
- # # Load the pose estimation model from Mediapipe
11
- # mp_pose = mp.solutions.pose
12
- # mp_drawing = mp.solutions.drawing_utils
13
- # pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
14
 
15
- # # Define the attention block for the LSTM model
16
- # def attention_block(inputs, time_steps):
17
- # a = Permute((2, 1))(inputs)
18
- # a = Dense(time_steps, activation='softmax')(a)
19
- # a_probs = Permute((2, 1), name='attention_vec')(a)
20
- # output_attention_mul = multiply([inputs, a_probs], name='attention_mul')
21
- # return output_attention_mul
22
 
23
- # # Build and load the LSTM model
24
- # @st.cache(allow_output_mutation=True)
25
- # def build_model(HIDDEN_UNITS=256, sequence_length=30, num_input_values=33*4, num_classes=3):
26
- # inputs = Input(shape=(sequence_length, num_input_values))
27
- # lstm_out = Bidirectional(LSTM(HIDDEN_UNITS, return_sequences=True))(inputs)
28
- # attention_mul = attention_block(lstm_out, sequence_length)
29
- # attention_mul = Flatten()(attention_mul)
30
- # x = Dense(2*HIDDEN_UNITS, activation='relu')(attention_mul)
31
- # x = Dropout(0.5)(x)
32
- # x = Dense(num_classes, activation='softmax')(x)
33
- # model = Model(inputs=[inputs], outputs=x)
34
- # load_dir = "./models/LSTM_Attention.h5"
35
- # model.load_weights(load_dir)
36
- # return model
37
-
38
- # # Define the VideoProcessor class for real-time video processing
39
- # class VideoProcessor:
40
- # def __init__(self):
41
- # self.actions = np.array(['curl', 'press', 'squat'])
42
- # self.sequence_length = 30
43
- # self.colors = [(245,117,16), (117,245,16), (16,117,245)]
44
- # self.pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
45
- # self.model = build_model()
46
-
47
- # def process_video(self, video_file):
48
- # # Get the filename from the file object
49
- # filename = video_file.name
50
- # # Create a temporary file to write the contents of the uploaded video file
51
- # temp_file = open(filename, 'wb')
52
- # temp_file.write(video_file.read())
53
- # temp_file.close()
54
- # # Now we can open the video file using cv2.VideoCapture()
55
- # cap = cv2.VideoCapture(filename)
56
- # out_frames = []
57
- # while cap.isOpened():
58
- # ret, frame = cap.read()
59
- # if not ret:
60
- # break
61
- # frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
62
- # results = self.pose.process(frame_rgb)
63
- # frame = self.draw_landmarks(frame, results)
64
- # out_frames.append(frame)
65
- # cap.release()
66
- # # Remove the temporary file
67
- # os.remove(filename)
68
- # return out_frames
69
-
70
- # def draw_landmarks(self, image, results):
71
- # mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
72
- # mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2),
73
- # mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2))
74
- # return image
75
 
76
  class VideoProcessor:
77
  def __init__(self):
@@ -240,6 +203,8 @@ class VideoProcessor:
240
  # Remove the temporary file
241
  os.remove(filename)
242
  return out_frames
 
 
243
  # Define Streamlit app
244
  def main():
245
  st.title("Real-time Exercise Detection")
 
1
+ import os
2
+ import streamlit as st
3
+ import cv2
4
+ import mediapipe as mp
5
+ import numpy as np
6
+ import math
7
+ from tensorflow.keras.models import Model
8
+ from tensorflow.keras.layers import (LSTM, Dense, Dropout, Input, Flatten,
9
+ Bidirectional, Permute, multiply)
10
 
11
+ # Load the pose estimation model from Mediapipe
12
+ mp_pose = mp.solutions.pose
13
+ mp_drawing = mp.solutions.drawing_utils
14
+ pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
15
 
16
+ # Define the attention block for the LSTM model
17
+ def attention_block(inputs, time_steps):
18
+ a = Permute((2, 1))(inputs)
19
+ a = Dense(time_steps, activation='softmax')(a)
20
+ a_probs = Permute((2, 1), name='attention_vec')(a)
21
+ output_attention_mul = multiply([inputs, a_probs], name='attention_mul')
22
+ return output_attention_mul
23
 
24
+ # Build and load the LSTM model
25
+ @st.cache(allow_output_mutation=True)
26
+ def build_model(HIDDEN_UNITS=256, sequence_length=30, num_input_values=33*4, num_classes=3):
27
+ inputs = Input(shape=(sequence_length, num_input_values))
28
+ lstm_out = Bidirectional(LSTM(HIDDEN_UNITS, return_sequences=True))(inputs)
29
+ attention_mul = attention_block(lstm_out, sequence_length)
30
+ attention_mul = Flatten()(attention_mul)
31
+ x = Dense(2*HIDDEN_UNITS, activation='relu')(attention_mul)
32
+ x = Dropout(0.5)(x)
33
+ x = Dense(num_classes, activation='softmax')(x)
34
+ model = Model(inputs=[inputs], outputs=x)
35
+ load_dir = "./models/LSTM_Attention.h5"
36
+ model.load_weights(load_dir)
37
+ return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  class VideoProcessor:
40
  def __init__(self):
 
203
  # Remove the temporary file
204
  os.remove(filename)
205
  return out_frames
206
+
207
+
208
  # Define Streamlit app
209
  def main():
210
  st.title("Real-time Exercise Detection")