harshitface2003 commited on
Commit
1740d75
·
verified ·
1 Parent(s): 14bfb32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -35
app.py CHANGED
@@ -1,19 +1,60 @@
1
  import gradio as gr
2
- from gradio_webrtc import WebRTC
3
  import cv2
 
4
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- def generation():
7
- url = 0
8
- cap = cv2.VideoCapture(url)
9
- iterating = True
10
- while iterating:
11
- iterating, frame = cap.read()
12
- frame=cv2.flip(frame,1)
13
- yield frame
14
 
15
  with gr.Blocks() as demo:
16
- image = WebRTC(label="Stream", mode="receive", modality="video", height=480, width=640)
17
  conf_threshold = gr.Slider(
18
  label="Confidence Threshold",
19
  minimum=0.0,
@@ -21,33 +62,11 @@ with gr.Blocks() as demo:
21
  step=0.05,
22
  value=0.30,
23
  )
24
- button = gr.Button("Start", variant="primary")
25
  image.stream(
26
- fn=generation, inputs=None, outputs=[image],
27
- trigger=button.click
 
28
  )
29
-
30
-
31
 
32
  if __name__ == "__main__":
33
  demo.launch()
34
- # import gradio as gr
35
- # from gradio_webrtc import WebRTC
36
-
37
-
38
- # with gr.Blocks() as demo:
39
- # image = WebRTC(label="Stream", mode="send-receive", modality="video")
40
- # conf_threshold = gr.Slider(
41
- # label="Confidence Threshold",
42
- # minimum=0.0,
43
- # maximum=1.0,
44
- # step=0.05,
45
- # value=0.30,
46
- # )
47
- # image.stream(
48
- # inputs=[image, conf_threshold],
49
- # outputs=[image], time_limit=10
50
- # )
51
-
52
- # if __name__ == "__main__":
53
- # demo.launch()
 
1
  import gradio as gr
 
2
  import cv2
3
+ from gradio_webrtc import WebRTC
4
  import os
5
+ import mediapipe as mp
6
+ from mediapipe.tasks import python
7
+ from mediapipe.tasks.python import vision, BaseOptions
8
+ from mediapipe import solutions
9
+ from mediapipe.framework.formats import landmark_pb2
10
+ import numpy as np
11
+ import cv2
12
+ from PIL import Image
13
+
14
+ MODEL_PATH = r"pose_landmarker_heavy.task"
15
+
16
+ # Drawing landmarks
17
+ def draw_landmarks_on_image(rgb_image, detection_result):
18
+ pose_landmarks_list = detection_result.pose_landmarks
19
+ annotated_image = np.copy(rgb_image)
20
+
21
+ for pose_landmarks in pose_landmarks_list:
22
+ pose_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
23
+ pose_landmarks_proto.landmark.extend([
24
+ landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in pose_landmarks
25
+ ])
26
+ solutions.drawing_utils.draw_landmarks(
27
+ annotated_image,
28
+ pose_landmarks_proto,
29
+ solutions.pose.POSE_CONNECTIONS,
30
+ solutions.drawing_styles.get_default_pose_landmarks_style())
31
+ return annotated_image
32
+
33
+
34
+ base_options = python.BaseOptions(delegate=0,model_asset_path=MODEL_PATH)
35
+ options = vision.PoseLandmarkerOptions(
36
+ base_options=base_options,
37
+ output_segmentation_masks=True)
38
+ detector = vision.PoseLandmarker.create_from_options(options)
39
+
40
+
41
+
42
+ def detection(image, conf_threshold=0.3):
43
+ frame = cv2.flip(image, 1)
44
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
45
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_frame)
46
+
47
+ # # Pose detection
48
+ detection_result = detector.detect(mp_image)
49
+
50
+ # Draw landmarks
51
+ annotated_image = draw_landmarks_on_image(mp_image.numpy_view(), detection_result)
52
+
53
+ return annotated_image
54
 
 
 
 
 
 
 
 
 
55
 
56
  with gr.Blocks() as demo:
57
+ image = WebRTC(label="Stream", mode="send-receive", modality="video", height=480, width=640, mirror_webcam=True)
58
  conf_threshold = gr.Slider(
59
  label="Confidence Threshold",
60
  minimum=0.0,
 
62
  step=0.05,
63
  value=0.30,
64
  )
 
65
  image.stream(
66
+ fn=detection,
67
+ inputs=[image, conf_threshold],
68
+ outputs=[image]
69
  )
 
 
70
 
71
  if __name__ == "__main__":
72
  demo.launch()