Update app.py
Browse files
app.py
CHANGED
@@ -8,11 +8,56 @@ import cv2
|
|
8 |
import numpy as np
|
9 |
import streamlit as st
|
10 |
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
|
|
11 |
from sample_utils.turn import get_ice_servers
|
|
|
|
|
|
|
|
|
12 |
|
13 |
HERE = Path(__file__).parent
|
14 |
ROOT = HERE
|
15 |
|
16 |
logger = logging.getLogger(__name__)
|
17 |
|
18 |
-
st.write("It is visible")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
import numpy as np
|
9 |
import streamlit as st
|
10 |
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
11 |
+
from sample_utils.webrtc_helpers import process_video_frame
|
12 |
from sample_utils.turn import get_ice_servers
|
13 |
+
from cvzone.HandTrackingModule import HandDetector
|
14 |
+
from cvzone.SelfiSegmentationModule import SelfiSegmentation
|
15 |
+
import time
|
16 |
+
import os
|
17 |
|
18 |
HERE = Path(__file__).parent
|
19 |
ROOT = HERE
|
20 |
|
21 |
logger = logging.getLogger(__name__)
|
22 |
|
23 |
+
st.write("It is visible")
|
24 |
+
|
25 |
+
# Streamlit settings
|
26 |
+
st.set_page_config(page_title="Virtual Keyboard", layout="wide")
|
27 |
+
st.title("Interactive Virtual Keyboard")
|
28 |
+
st.subheader('''Turn on the webcam and use hand gestures to interact with the virtual keyboard.
|
29 |
+
Use 'a' and 'd' from keyboard to change the background.
|
30 |
+
''')
|
31 |
+
# Initialize
|
32 |
+
detector = HandDetector(maxHands=1, detectionCon=0.8)
|
33 |
+
segmentor = SelfiSegmentation()
|
34 |
+
keys = [["Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P"],
|
35 |
+
["A", "S", "D", "F", "G", "H", "J", "K", "L", ";"],
|
36 |
+
["Z", "X", "C", "V", "B", "N", "M", ",", ".", "/"]]
|
37 |
+
|
38 |
+
listImg = os.listdir('model/street')
|
39 |
+
imgList = [cv2.imread(f'model/street/{imgPath}') for imgPath in listImg]
|
40 |
+
indexImg = 0
|
41 |
+
|
42 |
+
# Shared state for output text
|
43 |
+
if "output_text" not in st.session_state:
|
44 |
+
st.session_state["output_text"] = ""
|
45 |
+
|
46 |
+
|
47 |
+
webrtc_ctx = webrtc_streamer(
|
48 |
+
key="virtual-keyboard",
|
49 |
+
mode=WebRtcMode.SENDRECV,
|
50 |
+
rtc_configuration={
|
51 |
+
"iceServers": get_ice_servers(),
|
52 |
+
"iceTransportPolicy": "relay",
|
53 |
+
},
|
54 |
+
video_frame_callback=lambda frame: process_video_frame(
|
55 |
+
frame, detector, segmentor, imgList, indexImg, keys, st.session_state
|
56 |
+
),
|
57 |
+
media_stream_constraints={"video": True, "audio": False},
|
58 |
+
async_processing=True,
|
59 |
+
)
|
60 |
+
|
61 |
+
# Output text display
|
62 |
+
st.subheader("Output Text")
|
63 |
+
st.text_area("Live Input:", value=st.session_state["output_text"], height=200)
|