File size: 3,364 Bytes
8db7130 1a12ad2 8db7130 1a12ad2 8db7130 1a12ad2 8db7130 1a12ad2 8db7130 8b53196 123fa4e 0de3469 1a12ad2 6f35e7c 0de3469 8db7130 1a12ad2 15c1930 1a12ad2 8db7130 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
# -*- coding: utf-8 -*-
"""app.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1WeNkl1pYnT0qeOTsUFooLFLJ1arRHC00
"""
# %pip install ultralytics -q
# %pip install gradio -q
import cv2
import os
import PIL.Image as Image
import gradio as gr
import numpy as np
from ultralytics import YOLO
# load trained model
model = YOLO("best.pt")
# image inference function
def predict_image(img, conf_threshold, iou_threshold):
results = model.predict(
source=img,
conf=conf_threshold,
iou=iou_threshold,
show_labels=True,
show_conf=True,
imgsz=640,
)
for r in results:
im_array = r.plot()
im = Image.fromarray(im_array[..., ::-1])
return im
# directory for examples
image_directory = "/home/user/app/image"
video_directory = "/home/user/app/video"
# interface gradio setting for image
image_iface = gr.Interface(
fn=predict_image,
inputs=[
gr.Image(type="pil", label="Upload Image"),
gr.Slider(minimum=0, maximum=1, value=0.25, label="Confidence threshold"),
gr.Slider(minimum=0, maximum=1, value=0.45, label="IoU threshold")
],
outputs=gr.Image(type="pil", label="Result"),
title="Fire Detection using YOLOv8n on Gradio",
description="Upload images for inference. The Ultralytics YOLOv8n trained model is used for this.",
examples=[
[os.path.join(image_directory, "fire_image_1.jpg"), 0.25, 0.45],
[os.path.join(image_directory, "fire_image_3.jpg"), 0.25, 0.45],
]
)
# convert PIL image objects to numpy arrays
def pil_to_cv2(pil_image):
open_cv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
return open_cv_image
# process video, convert frame to PIL image
def process_video(video_path):
cap = cv2.VideoCapture(video_path)
processed_frames = []
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Model expects PIL Image format
pil_img = Image.fromarray(frame[..., ::-1]) # Convert BGR to RGB
result = model.predict(source=pil_img)
for r in result:
im_array = r.plot()
processed_frames.append(Image.fromarray(im_array[..., ::-1])) # Convert RGB back to BGR
cap.release()
# You may choose to display each frame or compile them back using cv2 or a similar library
# Display the processed frames
for frame in processed_frames:
cv2.imshow(pil_to_cv2(frame))
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
# return processed_frames[-1] # Example, returning the last processed frame
# interface setting for video
video_iface = gr.Interface(
fn=process_video,
inputs=[
gr.Video(label="Upload Video", interactive=True)
],
outputs=gr.Video(label="Result"),
title="Fire Detection using YOLOv8n on Gradio",
description="Upload video for inference. The Ultralytics YOLOv8n trained model is used for inference.",
examples=[
[os.path.join(video_directory, "video_fire_1.mp4")],
[os.path.join(video_directory, "video_fire_2.mp4")],
]
)
demo = gr.TabbedInterface([image_iface, video_iface], ["Image Inference", "Video Inference"])
if __name__ == '__main__':
iface.launch() |