Spaces:
Runtime error
Runtime error
File size: 4,808 Bytes
2f356cf 0fa76df 2f356cf 9dc104e 2f356cf a17b1ad 2f356cf 580d282 6675859 2f356cf 61fbf22 cd86d7f 2f356cf 9dc104e fbc6bbb 9dc104e 2f356cf 35749a1 2f356cf b85fc73 2f356cf cd86d7f 61fbf22 cd86d7f a17b1ad 2f356cf cd86d7f 2f356cf e357a0f cd86d7f 2f356cf e357a0f 2f356cf e357a0f 2f356cf be24c05 2f356cf 61fbf22 2f356cf 12d8bfa cd86d7f 12d8bfa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
import gradio as gr
import sahi.utils
from sahi import AutoDetectionModel
import sahi.predict
import sahi.slicing
from PIL import Image
import numpy
IMAGE_SIZE = 640
# Images
sahi.utils.file.download_from_url(
"https://user-images.githubusercontent.com/34196005/142730935-2ace3999-a47b-49bb-83e0-2bdd509f1c90.jpg",
"apple_tree.jpg",
)
sahi.utils.file.download_from_url(
"https://user-images.githubusercontent.com/34196005/142730936-1b397756-52e5-43be-a949-42ec0134d5d8.jpg",
"highway.jpg",
)
sahi.utils.file.download_from_url(
"https://user-images.githubusercontent.com/34196005/142742871-bf485f84-0355-43a3-be86-96b44e63c3a2.jpg",
"highway2.jpg",
)
sahi.utils.file.download_from_url(
"https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg",
"highway3.jpg",
)
# Model
model = AutoDetectionModel.from_pretrained(
model_type="yolov5", model_path="yolov5s6.pt", device="cpu", confidence_threshold=0.5, image_size=IMAGE_SIZE
)
def sahi_yolo_inference(
image,
slice_height=512,
slice_width=512,
overlap_height_ratio=0.2,
overlap_width_ratio=0.2,
postprocess_type="NMS",
postprocess_match_metric="IOU",
postprocess_match_threshold=0.5,
postprocess_class_agnostic=False,
):
image_width, image_height = image.size
sliced_bboxes = sahi.slicing.get_slice_bboxes(
image_height,
image_width,
slice_height,
slice_width,
False,
overlap_height_ratio,
overlap_width_ratio,
)
if len(sliced_bboxes) > 60:
raise ValueError(
f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
)
# standard inference
prediction_result_1 = sahi.predict.get_prediction(
image=image, detection_model=model
)
print(image)
visual_result_1 = sahi.utils.cv.visualize_object_predictions(
image=numpy.array(image),
object_prediction_list=prediction_result_1.object_prediction_list,
)
output_1 = Image.fromarray(visual_result_1["image"])
# sliced inference
prediction_result_2 = sahi.predict.get_sliced_prediction(
image=image,
detection_model=model,
slice_height=int(slice_height),
slice_width=int(slice_width),
overlap_height_ratio=overlap_height_ratio,
overlap_width_ratio=overlap_width_ratio,
postprocess_type=postprocess_type,
postprocess_match_metric=postprocess_match_metric,
postprocess_match_threshold=postprocess_match_threshold,
postprocess_class_agnostic=postprocess_class_agnostic,
)
visual_result_2 = sahi.utils.cv.visualize_object_predictions(
image=numpy.array(image),
object_prediction_list=prediction_result_2.object_prediction_list,
)
output_2 = Image.fromarray(visual_result_2["image"])
return output_1, output_2
inputs = [
gr.Image(type="pil", label="Original Image"),
gr.Number(default=512, label="slice_height"),
gr.Number(default=512, label="slice_width"),
gr.Number(default=0.2, label="overlap_height_ratio"),
gr.Number(default=0.2, label="overlap_width_ratio"),
gr.Dropdown(
["NMS", "GREEDYNMM"],
type="value",
value="NMS",
label="postprocess_type",
),
gr.Dropdown(
["IOU", "IOS"], type="value", default="IOU", label="postprocess_type"
),
gr.Number(default=0.5, label="postprocess_match_threshold"),
gr.Checkbox(default=True, label="postprocess_class_agnostic"),
]
outputs = [
gr.Image(type="pil", label="YOLOv5s"),
gr.Image(type="pil", label="YOLOv5s + SAHI"),
]
title = "Small Object Detection with SAHI + YOLOv5"
description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
examples = [
["apple_tree.jpg", 256, 256, 0.2, 0.2, "NMS", "IOU", 0.4, True],
["highway.jpg", 256, 256, 0.2, 0.2, "NMS", "IOU", 0.4, True],
["highway2.jpg", 512, 512, 0.2, 0.2, "NMS", "IOU", 0.4, True],
["highway3.jpg", 512, 512, 0.2, 0.2, "NMS", "IOU", 0.4, True],
]
gr.Interface(
sahi_yolo_inference,
inputs,
outputs,
title=title,
description=description,
article=article,
examples=examples,
theme="huggingface",
cache_examples=True,
).launch(debug=True, enable_queue=True)
|