Pratyush101 commited on
Commit
ac79aff
·
verified ·
1 Parent(s): a03b10d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -43
app.py CHANGED
@@ -26,55 +26,71 @@ class Button:
26
  self.text = text
27
 
28
  # Function to process the video frame from the webcam
29
- def process_video_frame(frame: av.VideoFrame, detector, segmentor, imgList, indexImg, keys, session_state)-> av.VideoFrame:
30
- # Convert the frame to a numpy array (BGR format)
31
- image = frame.to_ndarray(format="bgr24")
32
 
33
- # Remove background using SelfiSegmentation
34
- imgOut = segmentor.removeBG(image, imgList[indexImg])
35
 
36
- # Detect hands on the background-removed image
37
- hands, img = detector.findHands(imgOut, flipType=False)
38
 
39
- # Create a blank canvas for the keyboard
40
- keyboard_canvas = np.zeros_like(img)
41
- buttonList = []
42
-
43
- # Create buttons for the virtual keyboard based on the keys list
44
- for key in keys[0]:
45
- buttonList.append(Button([30 + keys[0].index(key) * 105, 30], key))
46
- for key in keys[1]:
47
- buttonList.append(Button([30 + keys[1].index(key) * 105, 150], key))
48
- for key in keys[2]:
49
- buttonList.append(Button([30 + keys[2].index(key) * 105, 260], key))
50
-
51
- # Draw the buttons on the keyboard canvas
52
- for button in buttonList:
53
- x, y = button.pos
54
- cv2.rectangle(keyboard_canvas, (x, y), (x + button.size[0], y + button.size[1]), (255, 255, 255), -1)
55
- cv2.putText(keyboard_canvas, button.text, (x + 20, y + 70), cv2.FONT_HERSHEY_PLAIN, 5, (0, 0, 0), 3)
56
-
57
- # Handle input and gestures from detected hands
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  if hands:
59
- for hand in hands:
60
- lmList = hand["lmList"]
61
- if lmList:
62
- # Get the coordinates of the index finger tip (landmark 8)
63
- x8, y8 = lmList[8][0], lmList[8][1]
64
- for button in buttonList:
65
- bx, by = button.pos
66
- bw, bh = button.size
67
- # Check if the index finger is over a button
68
- if bx < x8 < bx + bw and by < y8 < by + bh:
69
- # Highlight the button and update the text
70
- cv2.rectangle(img, (bx, by), (bx + bw, by + bh), (0, 255, 0), -1)
71
- cv2.putText(img, button.text, (bx + 20, by + 70), cv2.FONT_HERSHEY_PLAIN, 5, (255, 255, 255), 3)
72
- # Update the output text in session_state
73
- session_state["output_text"] += button.text
74
-
75
- # Corrected return: Create a video frame from the ndarray image
76
  return av.VideoFrame.from_ndarray(img, format="bgr24")
77
 
 
 
 
78
  # Initialize components
79
  detector = HandDetector(maxHands=1, detectionCon=0.8)
80
  segmentor = SelfiSegmentation()
 
26
  self.text = text
27
 
28
  # Function to process the video frame from the webcam
29
+ # def process_video_frame(frame: av.VideoFrame, detector, segmentor, imgList, indexImg, keys, session_state)-> av.VideoFrame:
30
+ # # Convert the frame to a numpy array (BGR format)
31
+ # image = frame.to_ndarray(format="bgr24")
32
 
33
+ # # Remove background using SelfiSegmentation
34
+ # imgOut = segmentor.removeBG(image, imgList[indexImg])
35
 
36
+ # # Detect hands on the background-removed image
37
+ # hands, img = detector.findHands(imgOut, flipType=False)
38
 
39
+ # # Create a blank canvas for the keyboard
40
+ # keyboard_canvas = np.zeros_like(img)
41
+ # buttonList = []
42
+
43
+ # # Create buttons for the virtual keyboard based on the keys list
44
+ # for key in keys[0]:
45
+ # buttonList.append(Button([30 + keys[0].index(key) * 105, 30], key))
46
+ # for key in keys[1]:
47
+ # buttonList.append(Button([30 + keys[1].index(key) * 105, 150], key))
48
+ # for key in keys[2]:
49
+ # buttonList.append(Button([30 + keys[2].index(key) * 105, 260], key))
50
+
51
+ # # Draw the buttons on the keyboard canvas
52
+ # for button in buttonList:
53
+ # x, y = button.pos
54
+ # cv2.rectangle(keyboard_canvas, (x, y), (x + button.size[0], y + button.size[1]), (255, 255, 255), -1)
55
+ # cv2.putText(keyboard_canvas, button.text, (x + 20, y + 70), cv2.FONT_HERSHEY_PLAIN, 5, (0, 0, 0), 3)
56
+
57
+ # # Handle input and gestures from detected hands
58
+ # if hands:
59
+ # for hand in hands:
60
+ # lmList = hand["lmList"]
61
+ # if lmList:
62
+ # # Get the coordinates of the index finger tip (landmark 8)
63
+ # x8, y8 = lmList[8][0], lmList[8][1]
64
+ # for button in buttonList:
65
+ # bx, by = button.pos
66
+ # bw, bh = button.size
67
+ # # Check if the index finger is over a button
68
+ # if bx < x8 < bx + bw and by < y8 < by + bh:
69
+ # # Highlight the button and update the text
70
+ # cv2.rectangle(img, (bx, by), (bx + bw, by + bh), (0, 255, 0), -1)
71
+ # cv2.putText(img, button.text, (bx + 20, by + 70), cv2.FONT_HERSHEY_PLAIN, 5, (255, 255, 255), 3)
72
+ # # Update the output text in session_state
73
+ # session_state["output_text"] += button.text
74
+
75
+ # # Corrected return: Create a video frame from the ndarray image
76
+ # return av.VideoFrame.from_ndarray(img, format="bgr24")
77
+
78
+
79
+ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
80
+ img = frame.to_ndarray(format="bgr24")
81
+ hands, img = detector.findHands(img, flipType=False)
82
+
83
+ # Render hand detection results
84
  if hands:
85
+ hand = hands[0]
86
+ bbox = hand["bbox"]
87
+ cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2)
88
+
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  return av.VideoFrame.from_ndarray(img, format="bgr24")
90
 
91
+
92
+
93
+
94
  # Initialize components
95
  detector = HandDetector(maxHands=1, detectionCon=0.8)
96
  segmentor = SelfiSegmentation()