Update app.py
Browse files
app.py
CHANGED
@@ -14,12 +14,13 @@ import cv2
|
|
14 |
import os
|
15 |
import PIL.Image as Image
|
16 |
import gradio as gr
|
17 |
-
|
18 |
-
from ultralytics import
|
19 |
|
20 |
# load trained model
|
21 |
model = YOLO("best.pt")
|
22 |
|
|
|
23 |
def predict_image(img, conf_threshold, iou_threshold):
|
24 |
results = model.predict(
|
25 |
source=img,
|
@@ -36,9 +37,12 @@ def predict_image(img, conf_threshold, iou_threshold):
|
|
36 |
|
37 |
return im
|
38 |
|
39 |
-
|
|
|
|
|
40 |
|
41 |
-
|
|
|
42 |
fn=predict_image,
|
43 |
inputs=[
|
44 |
gr.Image(type="pil", label="Upload Image"),
|
@@ -49,11 +53,58 @@ iface = gr.Interface(
|
|
49 |
title="Fire Detection using YOLOv8n on Gradio",
|
50 |
description="Upload images for inference. The Ultralytics YOLOv8n trained model is used for this.",
|
51 |
examples=[
|
52 |
-
[os.path.join(
|
53 |
-
[os.path.join(
|
54 |
|
55 |
]
|
56 |
)
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
if __name__ == '__main__':
|
59 |
iface.launch()
|
|
|
14 |
import os
|
15 |
import PIL.Image as Image
|
16 |
import gradio as gr
|
17 |
+
import numpy as np
|
18 |
+
from ultralytics import YOLO
|
19 |
|
20 |
# load trained model
|
21 |
model = YOLO("best.pt")
|
22 |
|
23 |
+
# image inference function
|
24 |
def predict_image(img, conf_threshold, iou_threshold):
|
25 |
results = model.predict(
|
26 |
source=img,
|
|
|
37 |
|
38 |
return im
|
39 |
|
40 |
+
# directory for examples
|
41 |
+
image_directory = "/home/user/app/image"
|
42 |
+
video_directory = "/home/user/app/video"
|
43 |
|
44 |
+
# interface gradio setting for image
|
45 |
+
image_iface = gr.Interface(
|
46 |
fn=predict_image,
|
47 |
inputs=[
|
48 |
gr.Image(type="pil", label="Upload Image"),
|
|
|
53 |
title="Fire Detection using YOLOv8n on Gradio",
|
54 |
description="Upload images for inference. The Ultralytics YOLOv8n trained model is used for this.",
|
55 |
examples=[
|
56 |
+
[os.path.join(image_directory, "fire_image_1.jpg"), 0.25, 0.45],
|
57 |
+
[os.path.join(image_directory, "fire_image_3.jpg"), 0.25, 0.45],
|
58 |
|
59 |
]
|
60 |
)
|
61 |
|
62 |
+
# convert PIL image objects to numpy arrays
|
63 |
+
def pil_to_cv2(pil_image):
|
64 |
+
open_cv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
|
65 |
+
return open_cv_image
|
66 |
+
|
67 |
+
# process video, convert frame to PIL image
|
68 |
+
def process_video(video_path):
|
69 |
+
cap = cv2.VideoCapture(video_path)
|
70 |
+
processed_frames = []
|
71 |
+
while cap.isOpened():
|
72 |
+
ret, frame = cap.read()
|
73 |
+
if not ret:
|
74 |
+
break
|
75 |
+
# Model expects PIL Image format
|
76 |
+
pil_img = Image.fromarray(frame[..., ::-1]) # Convert BGR to RGB
|
77 |
+
result = model.predict(source=pil_img)
|
78 |
+
for r in result:
|
79 |
+
im_array = r.plot()
|
80 |
+
processed_frames.append(Image.fromarray(im_array[..., ::-1])) # Convert RGB back to BGR
|
81 |
+
cap.release()
|
82 |
+
# You may choose to display each frame or compile them back using cv2 or a similar library
|
83 |
+
# Display the processed frames
|
84 |
+
for frame in processed_frames:
|
85 |
+
cv2.imshow(pil_to_cv2(frame))
|
86 |
+
if cv2.waitKey(25) & 0xFF == ord('q'):
|
87 |
+
break
|
88 |
+
cv2.destroyAllWindows()
|
89 |
+
# return processed_frames[-1] # Example, returning the last processed frame
|
90 |
+
|
91 |
+
# interface setting for video
|
92 |
+
video_iface = gr.Interface(
|
93 |
+
fn=process_video,
|
94 |
+
inputs=[
|
95 |
+
gr.Video(label="Upload Video", interactive=True)
|
96 |
+
],
|
97 |
+
outputs=gr.Video(label="Result"),
|
98 |
+
title="Fire Detection using YOLOv8n on Gradio",
|
99 |
+
description="Upload video for inference. The Ultralytics YOLOv8n trained model is used for inference.",
|
100 |
+
examples=[
|
101 |
+
[os.path.join(video_directory, "video_fire_1.jpg")],
|
102 |
+
[os.path.join(video_directory, "video_fire_2.jpg")],
|
103 |
+
]
|
104 |
+
)
|
105 |
+
|
106 |
+
|
107 |
+
demo = gr.TabbedInterface([image_iface, video_iface], ["Image Inference", "Video Inference"])
|
108 |
+
|
109 |
if __name__ == '__main__':
|
110 |
iface.launch()
|