Update app.py
Browse files
app.py
CHANGED
@@ -62,12 +62,31 @@ if "output_text" not in st.session_state:
|
|
62 |
result_queue = queue.Queue()
|
63 |
|
64 |
# Function to process video frame callback
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
result_queue.put(output_text)
|
70 |
-
|
|
|
|
|
|
|
71 |
|
72 |
webrtc_ctx = webrtc_streamer(
|
73 |
key="virtual-keyboard",
|
|
|
62 |
result_queue = queue.Queue()
|
63 |
|
64 |
# Function to process video frame callback
|
65 |
+
# Corrected function for video frame callback
|
66 |
+
#####FromHERE CHANGE
|
67 |
+
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
68 |
+
image = frame.to_ndarray(format="bgr24")
|
69 |
+
|
70 |
+
# Preprocess the frame
|
71 |
+
imgOut = segmentor.removeBG(image, imgList[indexImg])
|
72 |
+
|
73 |
+
# Detect hands
|
74 |
+
hands, processed_image = detector.findHands(imgOut, flipType=False)
|
75 |
+
|
76 |
+
# Create a blank canvas for the keyboard
|
77 |
+
keyboard_canvas = np.zeros_like(processed_image)
|
78 |
+
buttonList = create_buttons(keys)
|
79 |
+
|
80 |
+
# Draw keyboard buttons
|
81 |
+
draw_buttons(keyboard_canvas, buttonList)
|
82 |
+
|
83 |
+
# Process hands for interaction
|
84 |
+
output_text = process_hands(hands, buttonList, processed_image)
|
85 |
result_queue.put(output_text)
|
86 |
+
|
87 |
+
return av.VideoFrame.from_ndarray(processed_image, format="bgr24")
|
88 |
+
|
89 |
+
######______UPTO HERE CHANGE
|
90 |
|
91 |
webrtc_ctx = webrtc_streamer(
|
92 |
key="virtual-keyboard",
|