Spaces:
Running
Running
RizwanMunawar
commited on
Commit
Β·
6501158
1
Parent(s):
637e0b8
add support for image inference
Browse files
app.py
CHANGED
@@ -1,12 +1,11 @@
|
|
1 |
import gradio as gr
|
2 |
import PIL.Image as Image
|
3 |
-
|
4 |
-
import cv2
|
5 |
from ultralytics import ASSETS, YOLO
|
6 |
|
7 |
-
# Load YOLOv8 model
|
8 |
model = YOLO("yolov8n.pt")
|
9 |
|
|
|
10 |
def predict_image(img, conf_threshold, iou_threshold):
|
11 |
"""Predicts objects in an image using a YOLOv8 model with adjustable confidence and IOU thresholds."""
|
12 |
results = model.predict(
|
@@ -24,74 +23,19 @@ def predict_image(img, conf_threshold, iou_threshold):
|
|
24 |
|
25 |
return im
|
26 |
|
27 |
-
def predict_video(video_path, conf_threshold, iou_threshold):
|
28 |
-
"""Predicts objects in a video using a YOLOv8 model with adjustable confidence and IOU thresholds."""
|
29 |
-
# Create a temporary file to save the processed video
|
30 |
-
temp_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
|
31 |
-
temp_output.close()
|
32 |
-
|
33 |
-
# Load video
|
34 |
-
cap = cv2.VideoCapture(video_path)
|
35 |
-
|
36 |
-
# Get video properties
|
37 |
-
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
38 |
-
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
39 |
-
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
40 |
-
|
41 |
-
# Set up VideoWriter to save output video
|
42 |
-
out = cv2.VideoWriter(temp_output.name, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
|
43 |
-
|
44 |
-
while cap.isOpened():
|
45 |
-
ret, frame = cap.read()
|
46 |
-
if not ret:
|
47 |
-
break
|
48 |
-
|
49 |
-
# Perform inference on each frame
|
50 |
-
results = model.predict(
|
51 |
-
source=frame,
|
52 |
-
conf=conf_threshold,
|
53 |
-
iou=iou_threshold,
|
54 |
-
show_labels=True,
|
55 |
-
show_conf=True,
|
56 |
-
imgsz=640,
|
57 |
-
)
|
58 |
-
|
59 |
-
# Draw the results on the frame
|
60 |
-
for r in results:
|
61 |
-
frame = r.plot()
|
62 |
-
|
63 |
-
# Write the frame to the output video
|
64 |
-
out.write(frame)
|
65 |
-
|
66 |
-
# Release resources
|
67 |
-
cap.release()
|
68 |
-
out.release()
|
69 |
-
|
70 |
-
return temp_output.name
|
71 |
-
|
72 |
-
def process_input(input_file, conf_threshold, iou_threshold, mode):
|
73 |
-
"""Handles both image and video inference based on the selected mode."""
|
74 |
-
if mode == "Image":
|
75 |
-
return predict_image(input_file, conf_threshold, iou_threshold)
|
76 |
-
elif mode == "Video":
|
77 |
-
return predict_video(input_file.name, conf_threshold, iou_threshold)
|
78 |
-
|
79 |
-
# Create Gradio interface
|
80 |
iface = gr.Interface(
|
81 |
-
fn=
|
82 |
inputs=[
|
83 |
-
gr.
|
84 |
gr.Slider(minimum=0, maximum=1, value=0.25, label="Confidence threshold"),
|
85 |
gr.Slider(minimum=0, maximum=1, value=0.45, label="IoU threshold"),
|
86 |
-
gr.Radio(choices=["Image", "Video"], label="Select Mode", value="Image"),
|
87 |
],
|
88 |
-
outputs=gr.Image(type="pil", label="Result")
|
89 |
title="Ultralytics Gradio Application π",
|
90 |
-
description="Upload images
|
91 |
examples=[
|
92 |
-
[ASSETS / "bus.jpg", 0.25, 0.45
|
93 |
-
[ASSETS / "zidane.jpg", 0.25, 0.45
|
94 |
],
|
95 |
)
|
96 |
-
|
97 |
iface.launch(share=True)
|
|
|
1 |
import gradio as gr
|
2 |
import PIL.Image as Image
|
3 |
+
|
|
|
4 |
from ultralytics import ASSETS, YOLO
|
5 |
|
|
|
6 |
model = YOLO("yolov8n.pt")
|
7 |
|
8 |
+
|
9 |
def predict_image(img, conf_threshold, iou_threshold):
|
10 |
"""Predicts objects in an image using a YOLOv8 model with adjustable confidence and IOU thresholds."""
|
11 |
results = model.predict(
|
|
|
23 |
|
24 |
return im
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
iface = gr.Interface(
|
27 |
+
fn=predict_image,
|
28 |
inputs=[
|
29 |
+
gr.Image(type="pil", label="Upload Image"),
|
30 |
gr.Slider(minimum=0, maximum=1, value=0.25, label="Confidence threshold"),
|
31 |
gr.Slider(minimum=0, maximum=1, value=0.45, label="IoU threshold"),
|
|
|
32 |
],
|
33 |
+
outputs=gr.Image(type="pil", label="Result"),
|
34 |
title="Ultralytics Gradio Application π",
|
35 |
+
description="Upload images for inference. The Ultralytics YOLOv8n model is used by default.",
|
36 |
examples=[
|
37 |
+
[ASSETS / "bus.jpg", 0.25, 0.45],
|
38 |
+
[ASSETS / "zidane.jpg", 0.25, 0.45],
|
39 |
],
|
40 |
)
|
|
|
41 |
iface.launch(share=True)
|