Spaces:
Sleeping
Sleeping
import gradio as gr | |
import cv2 | |
from gradio_webrtc import WebRTC | |
import os | |
# import mediapipe as mp | |
# from mediapipe.tasks import python | |
# from mediapipe.tasks.python import vision, BaseOptions | |
# from mediapipe import solutions | |
# from mediapipe.framework.formats import landmark_pb2 | |
import numpy as np | |
import cv2 | |
from PIL import Image | |
# MODEL_PATH = r"pose_landmarker_lite.task" | |
# Drawing landmarks | |
# def draw_landmarks_on_image(rgb_image, detection_result): | |
# pose_landmarks_list = detection_result.pose_landmarks | |
# annotated_image = np.copy(rgb_image) | |
# for pose_landmarks in pose_landmarks_list: | |
# pose_landmarks_proto = landmark_pb2.NormalizedLandmarkList() | |
# pose_landmarks_proto.landmark.extend([ | |
# landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in pose_landmarks | |
# ]) | |
# solutions.drawing_utils.draw_landmarks( | |
# annotated_image, | |
# pose_landmarks_proto, | |
# solutions.pose.POSE_CONNECTIONS, | |
# solutions.drawing_styles.get_default_pose_landmarks_style()) | |
# return annotated_image | |
# base_options = python.BaseOptions(delegate=0,model_asset_path=MODEL_PATH) | |
# options = vision.PoseLandmarkerOptions( | |
# base_options=base_options, | |
# output_segmentation_masks=True) | |
# detector = vision.PoseLandmarker.create_from_options(options) | |
def detection(image, conf_threshold=0.3): | |
frame = cv2.flip(image, 1) | |
# rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
rgb_frame=cv2.circle(rgb_frame,(90,90),(255,0,0),20) | |
# mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_frame) | |
# # Pose detection | |
# detection_result = detector.detect(mp_image) | |
# # Draw landmarks | |
# annotated_image = draw_landmarks_on_image(mp_image.numpy_view(), detection_result) | |
return rgb_frame | |
with gr.Blocks() as demo: | |
image = WebRTC(label="Stream", mode="send-receive", modality="video", height=480, width=640, time_limit=10) | |
conf_threshold = gr.Slider( | |
label="Confidence Threshold", | |
minimum=0.0, | |
maximum=1.0, | |
step=0.05, | |
value=0.30, | |
) | |
image.stream( | |
fn=detection, | |
inputs=[image, conf_threshold], | |
outputs=[image] | |
) | |
if __name__ == "__main__": | |
demo.launch(share=True) | |