Update app.py
Browse files
app.py
CHANGED
@@ -64,27 +64,14 @@ result_queue = queue.Queue()
|
|
64 |
# Function to process video frame callback
|
65 |
# Corrected function for video frame callback
|
66 |
#####FromHERE CHANGE
|
|
|
|
|
67 |
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
68 |
-
|
69 |
-
|
70 |
-
#
|
71 |
-
imgOut = segmentor.removeBG(image, imgList[indexImg])
|
72 |
-
|
73 |
-
# Detect hands
|
74 |
-
hands, processed_image = detector.findHands(imgOut, flipType=False)
|
75 |
-
|
76 |
-
# Create a blank canvas for the keyboard
|
77 |
-
keyboard_canvas = np.zeros_like(processed_image)
|
78 |
-
buttonList = create_buttons(keys)
|
79 |
-
|
80 |
-
# Draw keyboard buttons
|
81 |
-
draw_buttons(keyboard_canvas, buttonList)
|
82 |
-
|
83 |
-
# Process hands for interaction
|
84 |
-
output_text = process_hands(hands, buttonList, processed_image)
|
85 |
result_queue.put(output_text)
|
86 |
-
|
87 |
-
return av.VideoFrame.from_ndarray(processed_image, format="bgr24")
|
88 |
|
89 |
######______UPTO HERE CHANGE
|
90 |
|
|
|
64 |
# Function to process video frame callback
|
65 |
# Corrected function for video frame callback
|
66 |
#####FromHERE CHANGE
|
67 |
+
# Function to process video frame callback
|
68 |
+
# Function to process video frame callback
|
69 |
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
70 |
+
# Process the frame asynchronously
|
71 |
+
output_text = process_video_frame(frame, detector, segmentor, imgList, indexImg, keys, st.session_state)
|
72 |
+
# Put the processed output text into the queue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
result_queue.put(output_text)
|
74 |
+
return frame # Return the processed frame for
|
|
|
75 |
|
76 |
######______UPTO HERE CHANGE
|
77 |
|