Update app.py
Browse files2nd step of building the AI virtual keyboard
app.py
CHANGED
@@ -25,7 +25,78 @@ Use 'a' and 'd' from the keyboard to change the background.''')
|
|
25 |
|
26 |
|
27 |
|
28 |
-
# Logging setup
|
29 |
-
logger = logging.getLogger(__name__)
|
30 |
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
|
27 |
|
28 |
+
# # Logging setup
|
29 |
+
# logger = logging.getLogger(__name__)
|
30 |
|
31 |
+
|
32 |
+
# Streamlit settings
|
33 |
+
st.set_page_config(page_title="Virtual Keyboard", layout="wide")
|
34 |
+
st.title("Interactive Virtual Keyboard")
|
35 |
+
st.subheader('''Turn on the webcam and use hand gestures to interact with the virtual keyboard.
|
36 |
+
Use 'a' and 'd' from the keyboard to change the background.''')
|
37 |
+
|
38 |
+
# Initialize modules
|
39 |
+
detector = HandDetector(maxHands=1, detectionCon=0.8)
|
40 |
+
segmentor = SelfiSegmentation()
|
41 |
+
|
42 |
+
# Define virtual keyboard layout
|
43 |
+
keys = [["Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P"],
|
44 |
+
["A", "S", "D", "F", "G", "H", "J", "K", "L", ";"],
|
45 |
+
["Z", "X", "C", "V", "B", "N", "M", ",", ".", "/"]]
|
46 |
+
|
47 |
+
class Button:
|
48 |
+
def _init_(self, pos, text, size=[100, 100]):
|
49 |
+
self.pos = pos
|
50 |
+
self.size = size
|
51 |
+
self.text = text
|
52 |
+
|
53 |
+
class Detection(NamedTuple):
|
54 |
+
label: str
|
55 |
+
score: float
|
56 |
+
box: np.ndarray
|
57 |
+
|
58 |
+
result_queue: "queue.Queue[List[Detection]]" = queue.Queue()
|
59 |
+
|
60 |
+
listImg = os.listdir('model/street') if os.path.exists('model/street') else []
|
61 |
+
if not listImg:
|
62 |
+
st.error("Error: 'street' directory is missing or empty. Please add background images.")
|
63 |
+
st.stop()
|
64 |
+
else:
|
65 |
+
imgList = [cv2.imread(f'model/street/{imgPath}') for imgPath in listImg if cv2.imread(f'model/street/{imgPath}') is not None]
|
66 |
+
|
67 |
+
indexImg = 0
|
68 |
+
prev_key_time = [time.time()] * 2
|
69 |
+
output_text = ""
|
70 |
+
|
71 |
+
if "output_text" not in st.session_state:
|
72 |
+
st.session_state["output_text"] = ""
|
73 |
+
|
74 |
+
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
75 |
+
global indexImg, output_text
|
76 |
+
|
77 |
+
img = frame.to_ndarray(format="bgr24")
|
78 |
+
|
79 |
+
detections = []
|
80 |
+
if hands:
|
81 |
+
for i, hand in enumerate(hands):
|
82 |
+
lmList = hand['lmList']
|
83 |
+
bbox = hand['bbox']
|
84 |
+
label = "Hand"
|
85 |
+
score = hand['score']
|
86 |
+
box = np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]])
|
87 |
+
detections.append(Detection(label=label, score=score, box=box))
|
88 |
+
result_queue.put(detections)
|
89 |
+
st.session_state["output_text"] = output_text
|
90 |
+
return av.VideoFrame.from_ndarray(imgOut, format="bgr24")
|
91 |
+
|
92 |
+
webrtc_streamer(
|
93 |
+
key="virtual-keyboard",
|
94 |
+
mode=WebRtcMode.SENDRECV,
|
95 |
+
rtc_configuration={"iceServers": get_ice_servers(), "iceTransportPolicy": "relay"},
|
96 |
+
media_stream_constraints={"video": True, "audio": False},
|
97 |
+
video_frame_callback=video_frame_callback,
|
98 |
+
async_processing=True,
|
99 |
+
)
|
100 |
+
|
101 |
+
|
102 |
+
|