File size: 2,633 Bytes
5c22f66 0a214bd 5c22f66 0a214bd f0b7f4a 0a214bd f0b7f4a 0a214bd f0b7f4a 0a214bd f0b7f4a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
#!/usr/bin/env python
from __future__ import annotations
import pathlib
import gradio as gr
import mediapipe as mp
import numpy as np
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
TITLE = 'MediaPipe Human Pose Estimation'
DESCRIPTION = 'https://google.github.io/mediapipe/'
def run(image: np.ndarray, model_complexity: int, enable_segmentation: bool,
min_detection_confidence: float, background_color: str) -> np.ndarray:
with mp_pose.Pose(
static_image_mode=True,
model_complexity=model_complexity,
enable_segmentation=enable_segmentation,
min_detection_confidence=min_detection_confidence) as pose:
results = pose.process(image)
res = image[:, :, ::-1].copy()
if enable_segmentation:
if background_color == 'white':
bg_color = 255
elif background_color == 'black':
bg_color = 0
elif background_color == 'green':
bg_color = (0, 255, 0) # type: ignore
else:
raise ValueError
if results.segmentation_mask is not None:
res[results.segmentation_mask <= 0.1] = bg_color
else:
res[:] = bg_color
mp_drawing.draw_landmarks(res,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS,
landmark_drawing_spec=mp_drawing_styles.
get_default_pose_landmarks_style())
return res[:, :, ::-1]
model_complexities = list(range(3))
background_colors = ['white', 'black', 'green']
image_paths = sorted(pathlib.Path('images').rglob('*.jpg'))
examples = [[path, model_complexities[1], True, 0.5, background_colors[0]]
for path in image_paths]
gr.Interface(
fn=run,
inputs=[
gr.Image(label='Input', type='numpy'),
gr.Radio(label='Model Complexity',
choices=model_complexities,
type='index',
value=model_complexities[1]),
gr.Checkbox(label='Enable Segmentation', value=True),
gr.Slider(label='Minimum Detection Confidence',
minimum=0,
maximum=1,
step=0.05,
value=0.5),
gr.Radio(label='Background Color',
choices=background_colors,
type='value',
value=background_colors[0]),
],
outputs=gr.Image(label='Output', height=500),
examples=examples,
title=TITLE,
description=DESCRIPTION,
).queue().launch()
|