Update app.py
Browse files
app.py
CHANGED
@@ -76,20 +76,6 @@ class Button:
|
|
76 |
# return av.VideoFrame.from_ndarray(img, format="bgr24")
|
77 |
|
78 |
|
79 |
-
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
80 |
-
img = frame.to_ndarray(format="bgr24")
|
81 |
-
hands, img = detector.findHands(img, flipType=False)
|
82 |
-
|
83 |
-
# Render hand detection results
|
84 |
-
if hands:
|
85 |
-
hand = hands[0]
|
86 |
-
bbox = hand["bbox"]
|
87 |
-
cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2)
|
88 |
-
|
89 |
-
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
|
94 |
# Initialize components
|
95 |
detector = HandDetector(maxHands=1, detectionCon=0.8)
|
@@ -102,6 +88,18 @@ listImg = os.listdir('model/street')
|
|
102 |
imgList = [cv2.imread(f'model/street/{imgPath}') for imgPath in listImg]
|
103 |
indexImg = 0
|
104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
# Shared state for output text
|
106 |
if "output_text" not in st.session_state:
|
107 |
st.session_state["output_text"] = ""
|
@@ -121,11 +119,9 @@ webrtc_ctx = webrtc_streamer(
|
|
121 |
mode=WebRtcMode.SENDRECV,
|
122 |
rtc_configuration={
|
123 |
"iceServers": get_ice_servers(),
|
124 |
-
"iceTransportPolicy": "relay",
|
125 |
},
|
126 |
video_frame_callback=video_frame_callback,
|
127 |
media_stream_constraints={"video": True, "audio": False},
|
128 |
-
async_processing=True,
|
129 |
)
|
130 |
|
131 |
st.markdown("### Instructions")
|
|
|
76 |
# return av.VideoFrame.from_ndarray(img, format="bgr24")
|
77 |
|
78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
# Initialize components
|
81 |
detector = HandDetector(maxHands=1, detectionCon=0.8)
|
|
|
88 |
imgList = [cv2.imread(f'model/street/{imgPath}') for imgPath in listImg]
|
89 |
indexImg = 0
|
90 |
|
91 |
+
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
92 |
+
img = frame.to_ndarray(format="bgr24")
|
93 |
+
hands, img = detector.findHands(img, flipType=False)
|
94 |
+
|
95 |
+
# Render hand detection results
|
96 |
+
if hands:
|
97 |
+
hand = hands[0]
|
98 |
+
bbox = hand["bbox"]
|
99 |
+
cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2)
|
100 |
+
|
101 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
102 |
+
|
103 |
# Shared state for output text
|
104 |
if "output_text" not in st.session_state:
|
105 |
st.session_state["output_text"] = ""
|
|
|
119 |
mode=WebRtcMode.SENDRECV,
|
120 |
rtc_configuration={
|
121 |
"iceServers": get_ice_servers(),
|
|
|
122 |
},
|
123 |
video_frame_callback=video_frame_callback,
|
124 |
media_stream_constraints={"video": True, "audio": False},
|
|
|
125 |
)
|
126 |
|
127 |
st.markdown("### Instructions")
|