Pratyush101 commited on
Commit
bea8c63
·
verified ·
1 Parent(s): 6f10329

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -222
app.py CHANGED
@@ -1,194 +1,3 @@
1
- # import logging
2
- # import queue
3
- # from pathlib import Path
4
- # from typing import List, NamedTuple
5
- # import mediapipe as mp
6
- # import av
7
- # import cv2
8
- # import numpy as np
9
- # import streamlit as st
10
- # from streamlit_webrtc import WebRtcMode, webrtc_streamer
11
- # from sample_utils.turn import get_ice_servers
12
- # from cvzone.HandTrackingModule import HandDetector
13
- # from cvzone.SelfiSegmentationModule import SelfiSegmentation
14
- # import time
15
- # import os
16
-
17
- # logger = logging.getLogger(__name__)
18
-
19
- # st.title("Interactive Virtual Keyboard with Twilio Integration")
20
- # st.info("Use your webcam to interact with the virtual keyboard via hand gestures.")
21
-
22
- # class Button:
23
- # def __init__(self, pos, text, size=[100, 100]):
24
- # self.pos = pos
25
- # self.size = size
26
- # self.text = text
27
-
28
- # # Initialize components
29
- # detector = HandDetector(maxHands=1, detectionCon=0.8)
30
- # # segmentor = SelfiSegmentation()
31
- # # keys = [["Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P"],
32
- # # ["A", "S", "D", "F", "G", "H", "J", "K", "L", ";"],
33
- # # ["Z", "X", "C", "V", "B", "N", "M", ",", ".", "/"]]
34
-
35
- # # listImg = os.listdir('model/street')
36
- # # imgList = [cv2.imread(f'model/street/{imgPath}') for imgPath in listImg]
37
- # # indexImg = 0
38
-
39
-
40
- # # # Function to process the video frame from the webcam
41
- # # def process_video_frame(frame, detector, segmentor, imgList, indexImg, keys, session_state):
42
- # # # Convert the frame to a numpy array (BGR format)
43
- # # image = frame.to_ndarray(format="bgr24")
44
-
45
- # # # Remove background using SelfiSegmentation
46
- # # imgOut = segmentor.removeBG(image, imgList[indexImg])
47
-
48
- # # # Detect hands on the background-removed image
49
- # # hands, img = detector.findHands(imgOut, flipType=False)
50
-
51
- # # # Create a blank canvas for the keyboard
52
- # # keyboard_canvas = np.zeros_like(img)
53
- # # buttonList = []
54
-
55
- # # # Create buttons for the virtual keyboard based on the keys list
56
- # # for key in keys[0]:
57
- # # buttonList.append(Button([30 + keys[0].index(key) * 105, 30], key))
58
- # # for key in keys[1]:
59
- # # buttonList.append(Button([30 + keys[1].index(key) * 105, 150], key))
60
- # # for key in keys[2]:
61
- # # buttonList.append(Button([30 + keys[2].index(key) * 105, 260], key))
62
-
63
- # # # Draw the buttons on the keyboard canvas
64
- # # for button in buttonList:
65
- # # x, y = button.pos
66
- # # cv2.rectangle(keyboard_canvas, (x, y), (x + button.size[0], y + button.size[1]), (255, 255, 255), -1)
67
- # # cv2.putText(keyboard_canvas, button.text, (x + 20, y + 70), cv2.FONT_HERSHEY_PLAIN, 5, (0, 0, 0), 3)
68
-
69
- # # # Handle input and gestures from detected hands
70
- # # if hands:
71
- # # for hand in hands:
72
- # # lmList = hand["lmList"]
73
- # # if lmList:
74
- # # # Get the coordinates of the index finger tip (landmark 8)
75
- # # x8, y8 = lmList[8][0], lmList[8][1]
76
- # # for button in buttonList:
77
- # # bx, by = button.pos
78
- # # bw, bh = button.size
79
- # # # Check if the index finger is over a button
80
- # # if bx < x8 < bx + bw and by < y8 < by + bh:
81
- # # # Highlight the button and update the text
82
- # # cv2.rectangle(img, (bx, by), (bx + bw, by + bh), (0, 255, 0), -1)
83
- # # cv2.putText(img, button.text, (bx + 20, by + 70), cv2.FONT_HERSHEY_PLAIN, 5, (255, 255, 255), 3)
84
- # # # Update the output text in session_state
85
- # # session_state["output_text"] += button.text
86
-
87
- # # # Corrected return: Create a video frame from the ndarray image
88
- # # return av.VideoFrame.from_ndarray(img, format="bgr24")
89
-
90
-
91
-
92
-
93
-
94
-
95
- # # Shared state for output text
96
- # if "output_text" not in st.session_state:
97
- # st.session_state["output_text"] = ""
98
-
99
- # class Detection(NamedTuple):
100
- # label: str
101
- # score: float
102
- # box: np.ndarray
103
-
104
-
105
- # @st.cache_resource # Cache label colors
106
- # def generate_label_colors():
107
- # return np.random.uniform(0, 255, size=(2, 3)) # Two classes: Left and Right Hand
108
-
109
-
110
- # COLORS = generate_label_colors()
111
-
112
- # # Initialize MediaPipe Hands
113
- # mp_hands = mp.solutions.hands
114
- # detector = mp_hands.Hands(static_image_mode=False, max_num_hands=2, min_detection_confidence=0.5)
115
-
116
- # # Session-specific caching
117
- # result_queue: "queue.Queue[List[Detection]]" = queue.Queue()
118
-
119
- # # Hand detection callback
120
- # def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
121
- # image = frame.to_ndarray(format="bgr24")
122
- # h, w = image.shape[:2]
123
-
124
- # # Process image with MediaPipe Hands
125
- # results = detector.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
126
-
127
- # detections = []
128
- # if results.multi_hand_landmarks:
129
- # for hand_landmarks, hand_class in zip(results.multi_hand_landmarks, results.multi_handedness):
130
- # # Extract bounding box
131
- # x_min, y_min = 1, 1
132
- # x_max, y_max = 0, 0
133
- # for lm in hand_landmarks.landmark:
134
- # x_min = min(x_min, lm.x)
135
- # y_min = min(y_min, lm.y)
136
- # x_max = max(x_max, lm.x)
137
- # y_max = max(y_max, lm.y)
138
-
139
- # # Scale bbox to image size
140
- # box = np.array([x_min * w, y_min * h, x_max * w, y_max * h]).astype("int")
141
-
142
- # # Label and score
143
- # label = hand_class.classification[0].label
144
- # score = hand_class.classification[0].score
145
-
146
- # detections.append(Detection(label=label, score=score, box=box))
147
-
148
- # # Draw bounding box and label
149
- # color = COLORS[0 if label == "Left" else 1]
150
- # cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), color, 2)
151
- # caption = f"{label}: {round(score * 100, 2)}%"
152
- # cv2.putText(
153
- # image,
154
- # caption,
155
- # (box[0], box[1] - 15 if box[1] - 15 > 15 else box[1] + 15),
156
- # cv2.FONT_HERSHEY_SIMPLEX,
157
- # 0.5,
158
- # color,
159
- # 2,
160
- # )
161
-
162
- # # Put results in the queue
163
- # result_queue.put(detections)
164
-
165
- # return av.VideoFrame.from_ndarray(image, format="bgr24")
166
-
167
-
168
-
169
- # webrtc_ctx = webrtc_streamer(
170
- # key="keyboard-demo",
171
- # mode=WebRtcMode.SENDRECV,
172
- # rtc_configuration={
173
- # "iceServers": get_ice_servers(),
174
- # "iceTransportPolicy": "relay",
175
- # },
176
- # video_frame_callback=video_frame_callback,
177
- # media_stream_constraints={"video": True, "audio": False},
178
- # async_processing=True,
179
- # )
180
-
181
-
182
- # st.markdown("### Instructions")
183
- # st.write(
184
- # """
185
- # 1. Turn on your webcam using the checkbox above.
186
- # 2. Use hand gestures to interact with the virtual keyboard.
187
- # """
188
- # )
189
-
190
- #)
191
-
192
  import logging
193
  import queue
194
  from pathlib import Path
@@ -295,45 +104,45 @@ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
295
 
296
 
297
 
298
- # # 1. Bend Forward Warning
299
- # if 10 < angleHipL < 18:
300
- # cv2.rectangle(image, (310, 180), (450, 220), (0, 0, 0), -1)
301
- # cv2.putText(image,f"Bend Forward",(320,200),cv2.FONT_HERSHEY_SIMPLEX,1,(150,120,255),1,cv2.LINE_AA)
302
 
303
- # # 2. Lean Backward Warning
304
- # if angleHipL > 45:
305
- # cv2.rectangle(image, (310, 180), (450, 220), (0, 0, 0), -1)
306
- # cv2.putText(image,f"Bend Backward",(320,200),cv2.FONT_HERSHEY_SIMPLEX,1,(80,120,255),1,cv2.LINE_AA)
307
 
308
  # # stage 2
309
 
310
  # # Incorrect movements
311
 
312
- # # 3. Knees not low enough
313
- # if 110 < angleKneeL < 130:
314
- # cv2.rectangle(image, (220, 40), (450, 80), (0, 0, 0), -1)
315
- # cv2.putText(image,f"Lower Your Hips",(230,60),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA)
316
 
317
 
318
- # # 3. Knees not low enough and not completed the squat
319
- # if angleKneeL>130 and stage=='mid':
320
- # cv2.rectangle(image, (220, 40), (450, 80), (0, 0, 0), -1)
321
- # cv2.putText(image,f"Lower Your Hips",(230,60),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA)
322
- # incorrect+=1
323
- # stage='up'
324
-
325
- # # 4. Squat too deep
326
- # if angleKneeL < 80 and stage=='mid':
327
- # cv2.rectangle(image, (220, 40), (450, 80), (0, 0, 0), -1)
328
- # cv2.putText(image,f"Squat too deep",(230,60),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA)
329
- # incorrect +=1
330
- # stage='up'
331
-
332
- # # stage 4
333
- # if (80 < angleKneeL < 110) and stage=='mid':
334
- # if (18 < angleHipL < 40): # Valid "down" position
335
- # correct+=1
336
- # stage='up'
337
 
338
 
339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import logging
2
  import queue
3
  from pathlib import Path
 
104
 
105
 
106
 
107
+ # 1. Bend Forward Warning
108
+ if 10 < angleHipL < 18:
109
+ cv2.rectangle(image, (310, 180), (450, 220), (0, 0, 0), -1)
110
+ cv2.putText(image,f"Bend Forward",(320,200),cv2.FONT_HERSHEY_SIMPLEX,1,(150,120,255),1,cv2.LINE_AA)
111
 
112
+ # 2. Lean Backward Warning
113
+ if angleHipL > 45:
114
+ cv2.rectangle(image, (310, 180), (450, 220), (0, 0, 0), -1)
115
+ cv2.putText(image,f"Bend Backward",(320,200),cv2.FONT_HERSHEY_SIMPLEX,1,(80,120,255),1,cv2.LINE_AA)
116
 
117
  # # stage 2
118
 
119
  # # Incorrect movements
120
 
121
+ # 3. Knees not low enough
122
+ if 110 < angleKneeL < 130:
123
+ cv2.rectangle(image, (220, 40), (450, 80), (0, 0, 0), -1)
124
+ cv2.putText(image,f"Lower Your Hips",(230,60),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA)
125
 
126
 
127
+ # 3. Knees not low enough and not completed the squat
128
+ if angleKneeL>130 and stage=='mid':
129
+ cv2.rectangle(image, (220, 40), (450, 80), (0, 0, 0), -1)
130
+ cv2.putText(image,f"Lower Your Hips",(230,60),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA)
131
+ incorrect+=1
132
+ stage='up'
133
+
134
+ # 4. Squat too deep
135
+ if angleKneeL < 80 and stage=='mid':
136
+ cv2.rectangle(image, (220, 40), (450, 80), (0, 0, 0), -1)
137
+ cv2.putText(image,f"Squat too deep",(230,60),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA)
138
+ incorrect +=1
139
+ stage='up'
140
+
141
+ # stage 4
142
+ if (80 < angleKneeL < 110) and stage=='mid':
143
+ if (18 < angleHipL < 40): # Valid "down" position
144
+ correct+=1
145
+ stage='up'
146
 
147
 
148