whitphx HF staff commited on
Commit
b808e34
·
1 Parent(s): 582c1cd

Update streamlit-webrtc to 0.6.0

Browse files
Files changed (2) hide show
  1. app.py +32 -3
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,8 +1,11 @@
1
  import logging
2
  import logging.handlers
3
  import queue
 
 
4
  import urllib.request
5
  from pathlib import Path
 
6
 
7
  try:
8
  from typing import Literal
@@ -227,14 +230,23 @@ def app_object_detection():
227
 
228
  DEFAULT_CONFIDENCE_THRESHOLD = 0.5
229
 
230
- class NNVideoTransformer(VideoTransformerBase):
231
  confidence_threshold: float
 
 
232
 
233
  def __init__(self) -> None:
234
  self._net = cv2.dnn.readNetFromCaffe(
235
  str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)
236
  )
237
  self.confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD
 
 
 
 
 
 
 
238
 
239
  def _annotate_image(self, image, detections):
240
  # loop over the detections
@@ -275,7 +287,11 @@ def app_object_detection():
275
  self._net.setInput(blob)
276
  detections = self._net.forward()
277
  annotated_image, labels = self._annotate_image(image, detections)
278
- # TODO: Show labels
 
 
 
 
279
 
280
  return annotated_image
281
 
@@ -283,7 +299,7 @@ def app_object_detection():
283
  key="object-detection",
284
  mode=WebRtcMode.SENDRECV,
285
  client_settings=WEBRTC_CLIENT_SETTINGS,
286
- video_transformer_factory=NNVideoTransformer,
287
  async_transform=True,
288
  )
289
 
@@ -293,6 +309,19 @@ def app_object_detection():
293
  if webrtc_ctx.video_transformer:
294
  webrtc_ctx.video_transformer.confidence_threshold = confidence_threshold
295
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
  st.markdown(
297
  "This demo uses a model and code from "
298
  "https://github.com/robmarkcole/object-detection-app. "
 
1
  import logging
2
  import logging.handlers
3
  import queue
4
+ import threading
5
+ import time
6
  import urllib.request
7
  from pathlib import Path
8
+ from typing import List, Union
9
 
10
  try:
11
  from typing import Literal
 
230
 
231
  DEFAULT_CONFIDENCE_THRESHOLD = 0.5
232
 
233
+ class MobileNetSSDVideoTransformer(VideoTransformerBase):
234
  confidence_threshold: float
235
+ _labels: Union[List[str], None]
236
+ _labels_lock: threading.Lock
237
 
238
  def __init__(self) -> None:
239
  self._net = cv2.dnn.readNetFromCaffe(
240
  str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)
241
  )
242
  self.confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD
243
+ self._labels = None
244
+ self._labels_lock = threading.Lock()
245
+
246
+ @property
247
+ def labels(self) -> Union[List[str], None]:
248
+ with self._labels_lock:
249
+ return self._labels
250
 
251
  def _annotate_image(self, image, detections):
252
  # loop over the detections
 
287
  self._net.setInput(blob)
288
  detections = self._net.forward()
289
  annotated_image, labels = self._annotate_image(image, detections)
290
+
291
+ # NOTE: This `transform` method is called in another thread,
292
+ # so it must be thread-safe.
293
+ with self._labels_lock:
294
+ self._labels = labels
295
 
296
  return annotated_image
297
 
 
299
  key="object-detection",
300
  mode=WebRtcMode.SENDRECV,
301
  client_settings=WEBRTC_CLIENT_SETTINGS,
302
+ video_transformer_factory=MobileNetSSDVideoTransformer,
303
  async_transform=True,
304
  )
305
 
 
309
  if webrtc_ctx.video_transformer:
310
  webrtc_ctx.video_transformer.confidence_threshold = confidence_threshold
311
 
312
+ if st.checkbox("Show the detected labels"):
313
+ if webrtc_ctx.state.playing:
314
+ labels_placeholder = st.empty()
315
+ # NOTE: The video transformation with object detection and
316
+ # this loop displaying the result labels are running
317
+ # in different threads asynchronously.
318
+ # Then the rendered video frames and the labels displayed here
319
+ # are not synchronized.
320
+ while True:
321
+ if webrtc_ctx.video_transformer:
322
+ labels_placeholder.write(webrtc_ctx.video_transformer.labels)
323
+ time.sleep(0.1)
324
+
325
  st.markdown(
326
  "This demo uses a model and code from "
327
  "https://github.com/robmarkcole/object-detection-app. "
requirements.txt CHANGED
@@ -4,5 +4,5 @@ av==8.0.2
4
  numpy==1.19.5
5
  opencv_python==4.5.1.48
6
  streamlit==0.74.1
7
- streamlit_webrtc==0.5.0
8
  typing_extensions==3.7.4.3
 
4
  numpy==1.19.5
5
  opencv_python==4.5.1.48
6
  streamlit==0.74.1
7
+ streamlit_webrtc==0.6.0
8
  typing_extensions==3.7.4.3