File size: 1,948 Bytes
d7de9f0
 
95eae85
d7de9f0
 
 
 
 
95eae85
 
 
 
d7de9f0
95eae85
 
d7de9f0
95eae85
d7de9f0
 
95eae85
 
 
d7de9f0
 
95eae85
 
 
 
 
 
 
 
 
 
d7de9f0
 
 
 
 
 
 
 
336643a
d7de9f0
 
336643a
d7de9f0
336643a
d7de9f0
336643a
d7de9f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import gradio as gr
import os
from hp.yolo_results import YOLOResults
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from PIL import Image
import io
from functools import lru_cache
import logging
from ultralytics import YOLO
from hp.utils import load_resize_image

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

model_id = os.path.abspath("yolo-human-parse-v2.pt")


@lru_cache
def get_model(model_id=model_id):
    return YOLO(model_id, task="segment")


def perform_segmentation(image):
    model = get_model()
    image = load_resize_image(image, 1024)
    imgsz = max(image.size)
    result = model(image, imgsz=imgsz, retina_masks=True)
    if not bool(result):
        logger.info("No Masks or Boxes Found")
        return image
    result = YOLOResults(image, result)
    return result.visualize(return_image=True)


# Get example images
example_images = [
    os.path.join("sample_images", f)
    for f in os.listdir("sample_images")
    if f.endswith((".png", ".jpg", ".jpeg"))
]
# body_labels = ["hair", "face", "arm", "hand", "leg", "foot", "outfit"]

with gr.Blocks() as demo:
    gr.Markdown("# YOLO Human Parse")
    gr.Markdown(
        "Upload an image of a person or select an example to see the YOLO segmentation results."
    )
    gr.Markdown("Labels: hair, face, arm, hand, leg, foot, outfit")

    with gr.Row():
        with gr.Column():
            input_image = gr.Image(type="pil", label="Input Image", height=512)
            segment_button = gr.Button("Perform Segmentation")

        output_image = gr.Image(label="Segmentation Result")

    gr.Examples(
        examples=example_images,
        inputs=input_image,
        outputs=output_image,
        fn=perform_segmentation,
        cache_examples=True,
    )

    segment_button.click(
        fn=perform_segmentation,
        inputs=input_image,
        outputs=output_image,
    )

demo.launch()