Pratyush101 commited on
Commit
20e2e7b
·
verified ·
1 Parent(s): d181aba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -16
app.py CHANGED
@@ -1,21 +1,19 @@
1
  import logging
2
  import queue
3
- from pathlib import Path
4
  from typing import List, NamedTuple
5
- import mediapipe as mp
6
  import av
7
  import cv2
8
  import numpy as np
9
  import streamlit as st
10
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
11
- from sample_utils.download import download_file
12
  from sample_utils.turn import get_ice_servers
13
  from cvzone.HandTrackingModule import HandDetector
14
  from cvzone.SelfiSegmentationModule import SelfiSegmentation
15
  import os
16
  import time
17
 
18
- # logger = logging.getLogger(_name_)
 
19
 
20
  # Streamlit settings
21
  st.set_page_config(page_title="Virtual Keyboard", page_icon="🏋️")
@@ -23,9 +21,8 @@ st.title("Interactive Virtual Keyboard")
23
  st.subheader('''Turn on the webcam and use hand gestures to interact with the virtual keyboard.
24
  Use 'a' and 'd' from the keyboard to change the background.''')
25
 
26
-
27
  # Initialize modules
28
- detector = HandDetector(maxHands=1, detectionCon=0.8)
29
  segmentor = SelfiSegmentation()
30
 
31
  # Define virtual keyboard layout
@@ -46,40 +43,42 @@ class Detection(NamedTuple):
46
 
47
  result_queue: "queue.Queue[List[Detection]]" = queue.Queue()
48
 
 
49
  listImg = os.listdir('model/street') if os.path.exists('model/street') else []
50
  if not listImg:
51
  st.error("Error: 'street' directory is missing or empty. Please add background images.")
52
  st.stop()
53
  else:
54
- imgList = [cv2.imread(f'model/street/{imgPath}') for imgPath in listImg if cv2.imread(f'model/street/{imgPath}') is not None]
 
55
 
56
  indexImg = 0
57
- prev_key_time = [time.time()] * 2
58
  output_text = ""
59
 
60
  if "output_text" not in st.session_state:
61
  st.session_state["output_text"] = ""
62
 
 
63
  def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
64
  global indexImg, output_text
65
 
66
  img = frame.to_ndarray(format="bgr24")
67
- hands = detector.findHands(img, draw=False)
68
 
69
  detections = []
70
  if hands:
71
- for i, hand in enumerate(hands):
72
- lmList = hand['lmList']
73
  bbox = hand['bbox']
74
  label = "Hand"
75
  score = hand['score']
76
  box = np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]])
77
- detections.append(Detection(label=label, score=score, box=box))
78
- cv2.imshow('WebCam with Virtual Keyboard', img)
79
  result_queue.put(detections)
80
  st.session_state["output_text"] = output_text
81
  return av.VideoFrame.from_ndarray(img, format="bgr24")
82
 
 
83
  webrtc_streamer(
84
  key="virtual-keyboard",
85
  mode=WebRtcMode.SENDRECV,
@@ -88,6 +87,3 @@ webrtc_streamer(
88
  video_frame_callback=video_frame_callback,
89
  async_processing=True,
90
  )
91
-
92
-
93
-
 
1
  import logging
2
  import queue
 
3
  from typing import List, NamedTuple
 
4
  import av
5
  import cv2
6
  import numpy as np
7
  import streamlit as st
8
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
 
9
  from sample_utils.turn import get_ice_servers
10
  from cvzone.HandTrackingModule import HandDetector
11
  from cvzone.SelfiSegmentationModule import SelfiSegmentation
12
  import os
13
  import time
14
 
15
+ # Logger Setup
16
+ logger = logging.getLogger(__name__)
17
 
18
  # Streamlit settings
19
  st.set_page_config(page_title="Virtual Keyboard", page_icon="🏋️")
 
21
  st.subheader('''Turn on the webcam and use hand gestures to interact with the virtual keyboard.
22
  Use 'a' and 'd' from the keyboard to change the background.''')
23
 
 
24
  # Initialize modules
25
+ detector = HandDetector(maxHands=1, detectionCon=0.85)
26
  segmentor = SelfiSegmentation()
27
 
28
  # Define virtual keyboard layout
 
43
 
44
  result_queue: "queue.Queue[List[Detection]]" = queue.Queue()
45
 
46
+ # Load background images
47
  listImg = os.listdir('model/street') if os.path.exists('model/street') else []
48
  if not listImg:
49
  st.error("Error: 'street' directory is missing or empty. Please add background images.")
50
  st.stop()
51
  else:
52
+ imgList = [cv2.imread(f'model/street/{imgPath}') for imgPath in listImg]
53
+ imgList = [img for img in imgList if img is not None]
54
 
55
  indexImg = 0
 
56
  output_text = ""
57
 
58
  if "output_text" not in st.session_state:
59
  st.session_state["output_text"] = ""
60
 
61
+ # Video Frame Callback
62
  def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
63
  global indexImg, output_text
64
 
65
  img = frame.to_ndarray(format="bgr24")
66
+ hands, img = detector.findHands(img, draw=True)
67
 
68
  detections = []
69
  if hands:
70
+ for hand in hands:
 
71
  bbox = hand['bbox']
72
  label = "Hand"
73
  score = hand['score']
74
  box = np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]])
75
+ detections.append(Detection(label=label, score=score, box=box))
76
+
77
  result_queue.put(detections)
78
  st.session_state["output_text"] = output_text
79
  return av.VideoFrame.from_ndarray(img, format="bgr24")
80
 
81
+ # WebRTC Streamer
82
  webrtc_streamer(
83
  key="virtual-keyboard",
84
  mode=WebRtcMode.SENDRECV,
 
87
  video_frame_callback=video_frame_callback,
88
  async_processing=True,
89
  )