Spaces:
Sleeping
Sleeping
File size: 2,332 Bytes
715530d 9d46885 1740d75 9d46885 d4fdd31 1740d75 0cbc129 1740d75 d4fdd31 1740d75 d4fdd31 1740d75 d4fdd31 1740d75 0cbc129 d4fdd31 1740d75 d4fdd31 1740d75 d4fdd31 1740d75 d4fdd31 715530d 6aedf14 7408480 0cbc129 7408480 1740d75 6aedf14 715530d db9c4a9 d4fdd31 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import gradio as gr
import cv2
from gradio_webrtc import WebRTC
import os
# import mediapipe as mp
# from mediapipe.tasks import python
# from mediapipe.tasks.python import vision, BaseOptions
# from mediapipe import solutions
# from mediapipe.framework.formats import landmark_pb2
import numpy as np
import cv2
from PIL import Image
# MODEL_PATH = r"pose_landmarker_lite.task"
# Drawing landmarks
# def draw_landmarks_on_image(rgb_image, detection_result):
# pose_landmarks_list = detection_result.pose_landmarks
# annotated_image = np.copy(rgb_image)
# for pose_landmarks in pose_landmarks_list:
# pose_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
# pose_landmarks_proto.landmark.extend([
# landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in pose_landmarks
# ])
# solutions.drawing_utils.draw_landmarks(
# annotated_image,
# pose_landmarks_proto,
# solutions.pose.POSE_CONNECTIONS,
# solutions.drawing_styles.get_default_pose_landmarks_style())
# return annotated_image
# base_options = python.BaseOptions(delegate=0,model_asset_path=MODEL_PATH)
# options = vision.PoseLandmarkerOptions(
# base_options=base_options,
# output_segmentation_masks=True)
# detector = vision.PoseLandmarker.create_from_options(options)
def detection(image, conf_threshold=0.3):
frame = cv2.flip(image, 1)
# rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb_frame=cv2.circle(rgb_frame,(90,90),(255,0,0),20)
# mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_frame)
# # Pose detection
# detection_result = detector.detect(mp_image)
# # Draw landmarks
# annotated_image = draw_landmarks_on_image(mp_image.numpy_view(), detection_result)
return rgb_frame
with gr.Blocks() as demo:
image = WebRTC(label="Stream", mode="send-receive", modality="video", height=480, width=640, time_limit=10)
conf_threshold = gr.Slider(
label="Confidence Threshold",
minimum=0.0,
maximum=1.0,
step=0.05,
value=0.30,
)
image.stream(
fn=detection,
inputs=[image, conf_threshold],
outputs=[image]
)
if __name__ == "__main__":
demo.launch(share=True)
|