Pratyush101 commited on
Commit
618adcc
·
verified ·
1 Parent(s): c32c42e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -6
app.py CHANGED
@@ -232,7 +232,7 @@ class Detection(NamedTuple):
232
  score: float
233
  box: np.ndarray
234
 
235
- result_queue: "queue.Queue[List[Detection]]" = queue.Queue()
236
 
237
  listImg = os.listdir('model/street') if os.path.exists('model/street') else []
238
  if not listImg:
@@ -249,7 +249,6 @@ if "output_text" not in st.session_state:
249
  st.session_state["output_text"] = ""
250
 
251
 
252
- result_queue=queue.Queue()
253
  # def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
254
  # img = frame.to_ndarray(format="bgr24")
255
  # hands, img = detector.findHands(img, flipType=False)
@@ -269,13 +268,34 @@ result_queue=queue.Queue()
269
 
270
  # return av.VideoFrame.from_ndarray(img, format="bgr24")
271
 
 
 
 
 
272
  def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
273
  img = frame.to_ndarray(format="bgr24")
 
 
 
 
 
 
 
274
  if hands:
275
- hand = hands[0]
276
- bbox = hand["bbox"]
277
- cv2.rectangle(img, (bbox[0], bbox[1]), (255, 0, 0), 2)
278
- result_queue.put(hands)
 
 
 
 
 
 
 
 
 
 
279
 
280
  return av.VideoFrame.from_ndarray(img, format="bgr24")
281
 
 
232
  score: float
233
  box: np.ndarray
234
 
235
+ # result_queue: "queue.Queue[List[Detection]]" = queue.Queue()
236
 
237
  listImg = os.listdir('model/street') if os.path.exists('model/street') else []
238
  if not listImg:
 
249
  st.session_state["output_text"] = ""
250
 
251
 
 
252
  # def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
253
  # img = frame.to_ndarray(format="bgr24")
254
  # hands, img = detector.findHands(img, flipType=False)
 
268
 
269
  # return av.VideoFrame.from_ndarray(img, format="bgr24")
270
 
271
+
272
+ # Initialize result queue
273
+ result_queue = queue.Queue()
274
+
275
  def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
276
  img = frame.to_ndarray(format="bgr24")
277
+
278
+ # Detect hands
279
+ hands, img = detector.findHands(img, flipType=False)
280
+
281
+ # Collect detections
282
+ detections = []
283
+
284
  if hands:
285
+ for hand in hands:
286
+ bbox = hand["bbox"]
287
+ label = hand["type"]
288
+ score = hand["score"]
289
+
290
+ # Draw bounding box
291
+ cv2.rectangle(img, (bbox[0], bbox[1]),
292
+ (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2)
293
+
294
+ # Append detection details
295
+ detections.append({"label": label, "score": score, "bbox": bbox})
296
+
297
+ # Put detections into result queue
298
+ result_queue.put(detections)
299
 
300
  return av.VideoFrame.from_ndarray(img, format="bgr24")
301