Pratyush101 commited on
Commit
4600684
·
verified ·
1 Parent(s): 4b07573

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -8
app.py CHANGED
@@ -88,18 +88,80 @@ listImg = os.listdir('model/street')
88
  imgList = [cv2.imread(f'model/street/{imgPath}') for imgPath in listImg]
89
  indexImg = 0
90
 
91
- def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
92
- img = frame.to_ndarray(format="bgr24")
93
- hands, img = detector.findHands(img, flipType=False)
94
 
95
- # Render hand detection results
96
- if hands:
97
- hand = hands[0]
98
- bbox = hand["bbox"]
99
- cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2)
 
 
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  return av.VideoFrame.from_ndarray(img, format="bgr24")
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  # Shared state for output text
104
  if "output_text" not in st.session_state:
105
  st.session_state["output_text"] = ""
 
88
  imgList = [cv2.imread(f'model/street/{imgPath}') for imgPath in listImg]
89
  indexImg = 0
90
 
 
 
 
91
 
92
+ Function to process the video frame from the webcam
93
+ def process_video_frame(frame: av.VideoFrame, detector, segmentor, imgList, indexImg, keys, session_state)-> av.VideoFrame:
94
+ # Convert the frame to a numpy array (BGR format)
95
+ image = frame.to_ndarray(format="bgr24")
96
+
97
+ # Remove background using SelfiSegmentation
98
+ imgOut = segmentor.removeBG(image, imgList[indexImg])
99
 
100
+ # Detect hands on the background-removed image
101
+ hands, img = detector.findHands(imgOut, flipType=False)
102
+
103
+ # Create a blank canvas for the keyboard
104
+ keyboard_canvas = np.zeros_like(img)
105
+ buttonList = []
106
+
107
+ # Create buttons for the virtual keyboard based on the keys list
108
+ for key in keys[0]:
109
+ buttonList.append(Button([30 + keys[0].index(key) * 105, 30], key))
110
+ for key in keys[1]:
111
+ buttonList.append(Button([30 + keys[1].index(key) * 105, 150], key))
112
+ for key in keys[2]:
113
+ buttonList.append(Button([30 + keys[2].index(key) * 105, 260], key))
114
+
115
+ # Draw the buttons on the keyboard canvas
116
+ for button in buttonList:
117
+ x, y = button.pos
118
+ cv2.rectangle(keyboard_canvas, (x, y), (x + button.size[0], y + button.size[1]), (255, 255, 255), -1)
119
+ cv2.putText(keyboard_canvas, button.text, (x + 20, y + 70), cv2.FONT_HERSHEY_PLAIN, 5, (0, 0, 0), 3)
120
+
121
+ # Handle input and gestures from detected hands
122
+ if hands:
123
+ for hand in hands:
124
+ lmList = hand["lmList"]
125
+ if lmList:
126
+ # Get the coordinates of the index finger tip (landmark 8)
127
+ x8, y8 = lmList[8][0], lmList[8][1]
128
+ for button in buttonList:
129
+ bx, by = button.pos
130
+ bw, bh = button.size
131
+ # Check if the index finger is over a button
132
+ if bx < x8 < bx + bw and by < y8 < by + bh:
133
+ # Highlight the button and update the text
134
+ cv2.rectangle(img, (bx, by), (bx + bw, by + bh), (0, 255, 0), -1)
135
+ cv2.putText(img, button.text, (bx + 20, by + 70), cv2.FONT_HERSHEY_PLAIN, 5, (255, 255, 255), 3)
136
+ # Update the output text in session_state
137
+ session_state["output_text"] += button.text
138
+
139
+ # Corrected return: Create a video frame from the ndarray image
140
  return av.VideoFrame.from_ndarray(img, format="bgr24")
141
 
142
+
143
+
144
+
145
+
146
+
147
+
148
+
149
+
150
+
151
+
152
+
153
+ # def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
154
+ # img = frame.to_ndarray(format="bgr24")
155
+ # hands, img = detector.findHands(img, flipType=False)
156
+
157
+ # # Render hand detection results
158
+ # if hands:
159
+ # hand = hands[0]
160
+ # bbox = hand["bbox"]
161
+ # cv2.rectangle(img, (bbox[0], bbox[1]), (255, 0, 0), 2)
162
+
163
+ # return av.VideoFrame.from_ndarray(img, format="bgr24")
164
+
165
  # Shared state for output text
166
  if "output_text" not in st.session_state:
167
  st.session_state["output_text"] = ""