Spaces:
Sleeping
Sleeping
File size: 2,335 Bytes
2f92278 606d19f 8ee981b 606d19f 7e16940 8ee981b 606d19f 8ee981b 606d19f a675789 7e16940 606d19f ef7efe9 606d19f ef7efe9 606d19f ef7efe9 606d19f ef7efe9 606d19f 8ee981b 606d19f ef7efe9 606d19f ef7efe9 8ee981b 606d19f 8ee981b 7e16940 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import gradio as gr
import cv2
from gradio_webrtc import WebRTC
import mediapipe as mp
# 初始化 MediaPipe Hands
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
hands = mp_hands.Hands(min_detection_confidence=0.3, min_tracking_confidence=0.3) # 降低置信度提升速度
# WebRTC 配置
rtc_configuration = {
"iceServers": [{"urls": "stun:stun.l.google.com:19302"}],
"iceTransportPolicy": "relay"
}
# 手势检测函数
def detection(image, conf_threshold=0.5):
"""
使用 MediaPipe Hands 进行手势检测。
"""
# 将图像从 BGR 转换为 RGB(MediaPipe 需要 RGB 格式)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# 将图像大小缩小到一个较小的尺寸,降低计算负担
image = cv2.resize(image, (640, 480))
# 使用 MediaPipe Hands 处理图像
results = hands.process(image_rgb)
# 如果检测到手,绘制手部关键点
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS
)
# 返回带注释的图像
return image
# Gradio 界面
css = """.my-group {max-width: 600px !important; max-height: 600 !important;}
.my-column {display: flex !important; justify-content: center !important; align-items: center !important;}"""
with gr.Blocks(css=css) as demo:
gr.HTML(
"""
<h1 style='text-align: center'>
Hand Gesture Detection with MediaPipe (Powered by WebRTC ⚡️)
</h1>
"""
)
gr.HTML(
"""
<h3 style='text-align: center'>
<a href='https://mediapipe.dev/'>MediaPipe Hands</a>
</h3>
"""
)
with gr.Column(elem_classes=["my-column"]):
with gr.Group(elem_classes=["my-group"]):
image = WebRTC(label="Stream", rtc_configuration=rtc_configuration)
conf_threshold = gr.Slider(
label="Confidence Threshold",
minimum=0.0,
maximum=1.0,
step=0.05,
value=0.5,
)
image.stream(
fn=detection, inputs=[image, conf_threshold], outputs=[image], time_limit=10
)
if __name__ == "__main__":
demo.launch() |