Update app.py
Browse files
app.py
CHANGED
@@ -1,61 +1,29 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
import matplotlib.pyplot as plt
|
3 |
from PIL import Image
|
4 |
from ultralyticsplus import YOLO
|
5 |
import cv2
|
6 |
import numpy as np
|
7 |
-
from transformers import pipeline
|
8 |
import requests
|
9 |
from io import BytesIO
|
10 |
import os
|
11 |
|
12 |
model = YOLO('Corn-Disease50epoch.pt')
|
13 |
-
name = ['Leaf Blight','Corn Rust','Gray Leaf Spot', 'Healthy']
|
14 |
image_directory = "/home/user/app/images"
|
15 |
-
# video_directory = "/home/user/app/video"
|
16 |
-
|
17 |
-
# url_example="https://drive.google.com/file/d/1bBq0bNmJ5X83tDWCzdzHSYCdg-aUL4xO/view?usp=drive_link"
|
18 |
-
# url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2]
|
19 |
-
# r = requests.get(url_example)
|
20 |
-
# im1 = Image.open(BytesIO(r.content))
|
21 |
-
|
22 |
-
# url_example="https://drive.google.com/file/d/16Z7QzvZ99fbEPj1sls_jOCJBsC0h_dYZ/view?usp=drive_link"
|
23 |
-
# url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2]
|
24 |
-
# r = requests.get(url_example)
|
25 |
-
# im2 = Image.open(BytesIO(r.content))
|
26 |
-
|
27 |
-
# url_example="https://drive.google.com/file/d/13mjTMS3eR0AKYSbV-Fpb3fTBno_T42JN/view?usp=drive_link"
|
28 |
-
# url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2]
|
29 |
-
# r = requests.get(url_example)
|
30 |
-
# im3 = Image.open(BytesIO(r.content))
|
31 |
-
|
32 |
-
# url_example="https://drive.google.com/file/d/1-XpFsa_nz506Ul6grKElVJDu_Jl3KZIF/view?usp=drive_link"
|
33 |
-
# url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2]
|
34 |
-
# r = requests.get(url_example)
|
35 |
-
# im4 = Image.open(BytesIO(r.content))
|
36 |
-
# for i, r in enumerate(results):
|
37 |
-
|
38 |
-
# # Plot results image
|
39 |
-
# im_bgr = r.plot()
|
40 |
-
# im_rgb = im_bgr[..., ::-1] # Convert BGR to RGB
|
41 |
-
|
42 |
-
|
43 |
-
def response2(image: gr.Image = None,image_size: gr.Slider = 640, conf_threshold: gr.Slider = 0.3, iou_threshold: gr.Slider = 0.6):
|
44 |
|
|
|
45 |
results = model.predict(image, conf=conf_threshold, iou=iou_threshold, imgsz=image_size)
|
46 |
-
|
47 |
text = ""
|
48 |
name_weap = ""
|
49 |
solution = ""
|
50 |
-
|
51 |
-
box = results[0].boxes
|
52 |
|
53 |
for r in results:
|
54 |
im_array = r.plot()
|
55 |
im = Image.fromarray(im_array[..., ::-1])
|
56 |
|
57 |
-
|
58 |
-
|
59 |
for r in results:
|
60 |
conf = np.array(r.boxes.conf.cpu())
|
61 |
cls = np.array(r.boxes.cls.cpu())
|
@@ -65,9 +33,9 @@ def response2(image: gr.Image = None,image_size: gr.Slider = 640, conf_threshold
|
|
65 |
|
66 |
for con, cl, xy in zip(conf, cls, xywh):
|
67 |
cone = con.astype(float)
|
68 |
-
conef = round(cone,3)
|
69 |
conef = conef * 100
|
70 |
-
text += (f"Detected {name[cl]} with confidence {round(conef,1)}% at ({xy[0]},{xy[1]})\n")
|
71 |
|
72 |
if name[cl] == "Corn Rust":
|
73 |
solution = (f"{solution} Apply fungicides with active ingredients like propiconazole or tebuconazole when symptoms appear.\n")
|
@@ -75,47 +43,13 @@ def response2(image: gr.Image = None,image_size: gr.Slider = 640, conf_threshold
|
|
75 |
solution = (f"{solution} Use fungicides containing strobilurins (e.g., azoxystrobin) or triazoles.\n")
|
76 |
elif name[cl] == "Leaf Blight":
|
77 |
solution = (f"{solution} Treat with fungicides such as mancozeb or chlorothalonil during the early stages.\n")
|
78 |
-
|
79 |
-
# xywh = int(results.boxes.xywh)
|
80 |
-
# x = xywh[0]
|
81 |
-
# y = xywh[1]
|
82 |
|
83 |
return im, text, solution
|
84 |
|
85 |
-
|
86 |
-
inputs = [
|
87 |
-
gr.Image(type="pil", label="Input Image"),
|
88 |
-
gr.Slider(minimum=320, maximum=1280, value=640,
|
89 |
-
step=32, label="Image Size"),
|
90 |
-
gr.Slider(minimum=0.0, maximum=1.0, value=0.3,
|
91 |
-
step=0.05, label="Confidence Threshold"),
|
92 |
-
gr.Slider(minimum=0.0, maximum=1.0, value=0.6,
|
93 |
-
step=0.05, label="IOU Threshold"),
|
94 |
-
]
|
95 |
-
|
96 |
-
outputs = [gr.Image( type="pil", label="Output Image"),
|
97 |
-
gr.Textbox(label="Result"), gr.Textbox(label="Solution")
|
98 |
-
]
|
99 |
-
|
100 |
-
examples = [
|
101 |
-
["/home/user/app/images/jagung7.jpg", 640, 0.3, 0.6],
|
102 |
-
["/home/user/app/images/jagung4.jpeg", 640, 0.3, 0.6],
|
103 |
-
["/home/user/app/images/jagung6.jpeg", 640, 0.3, 0.6]
|
104 |
-
]
|
105 |
-
|
106 |
-
title = """Corn Diseases Detection Finetuned YOLOv11
|
107 |
-
<br></br>
|
108 |
-
<a href="https://colab.research.google.com/drive/1vnxtgPKOgfC8nyCL9hjrNFed75StsqGQ?usp=sharing">
|
109 |
-
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Colab" style="display:inline-block;">
|
110 |
-
</a> """
|
111 |
-
description = 'Image Size: Defines the image size for inference.\nConfidence Treshold: Sets the minimum confidence threshold for detections.\nIOU Treshold: Intersection Over Union (IoU) threshold for Non-Maximum Suppression (NMS). Useful for reducing duplicates.'
|
112 |
-
|
113 |
-
|
114 |
def pil_to_cv2(pil_image):
|
115 |
open_cv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
|
116 |
return open_cv_image
|
117 |
|
118 |
-
|
119 |
def process_video(video_path):
|
120 |
cap = cv2.VideoCapture(video_path)
|
121 |
|
@@ -132,22 +66,44 @@ def process_video(video_path):
|
|
132 |
yield processed_frame
|
133 |
cap.release()
|
134 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
video_iface = gr.Interface(
|
137 |
fn=process_video,
|
138 |
-
inputs=
|
139 |
-
|
140 |
-
],
|
141 |
-
outputs=gr.Image(type="pil",label="Result"),
|
142 |
title=title,
|
143 |
-
description="Upload video for inference."
|
144 |
-
# examples=[[os.path.join(video_directory, "ExampleRifle.mp4")],
|
145 |
-
# [os.path.join(video_directory, "Knife.mp4")],
|
146 |
-
# ]
|
147 |
)
|
148 |
|
149 |
-
|
150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
|
152 |
demo = gr.TabbedInterface([image_iface, video_iface], ["Image Inference", "Video Inference"])
|
153 |
|
|
|
1 |
+
|
2 |
import gradio as gr
|
3 |
import matplotlib.pyplot as plt
|
4 |
from PIL import Image
|
5 |
from ultralyticsplus import YOLO
|
6 |
import cv2
|
7 |
import numpy as np
|
|
|
8 |
import requests
|
9 |
from io import BytesIO
|
10 |
import os
|
11 |
|
12 |
model = YOLO('Corn-Disease50epoch.pt')
|
13 |
+
name = ['Leaf Blight', 'Corn Rust', 'Gray Leaf Spot', 'Healthy']
|
14 |
image_directory = "/home/user/app/images"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
def response2(image, image_size=640, conf_threshold=0.3, iou_threshold=0.6):
|
17 |
results = model.predict(image, conf=conf_threshold, iou=iou_threshold, imgsz=image_size)
|
18 |
+
|
19 |
text = ""
|
20 |
name_weap = ""
|
21 |
solution = ""
|
|
|
|
|
22 |
|
23 |
for r in results:
|
24 |
im_array = r.plot()
|
25 |
im = Image.fromarray(im_array[..., ::-1])
|
26 |
|
|
|
|
|
27 |
for r in results:
|
28 |
conf = np.array(r.boxes.conf.cpu())
|
29 |
cls = np.array(r.boxes.cls.cpu())
|
|
|
33 |
|
34 |
for con, cl, xy in zip(conf, cls, xywh):
|
35 |
cone = con.astype(float)
|
36 |
+
conef = round(cone, 3)
|
37 |
conef = conef * 100
|
38 |
+
text += (f"Detected {name[cl]} with confidence {round(conef, 1)}% at ({xy[0]},{xy[1]})\n")
|
39 |
|
40 |
if name[cl] == "Corn Rust":
|
41 |
solution = (f"{solution} Apply fungicides with active ingredients like propiconazole or tebuconazole when symptoms appear.\n")
|
|
|
43 |
solution = (f"{solution} Use fungicides containing strobilurins (e.g., azoxystrobin) or triazoles.\n")
|
44 |
elif name[cl] == "Leaf Blight":
|
45 |
solution = (f"{solution} Treat with fungicides such as mancozeb or chlorothalonil during the early stages.\n")
|
|
|
|
|
|
|
|
|
46 |
|
47 |
return im, text, solution
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
def pil_to_cv2(pil_image):
|
50 |
open_cv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
|
51 |
return open_cv_image
|
52 |
|
|
|
53 |
def process_video(video_path):
|
54 |
cap = cv2.VideoCapture(video_path)
|
55 |
|
|
|
66 |
yield processed_frame
|
67 |
cap.release()
|
68 |
|
69 |
+
inputs = [
|
70 |
+
gr.Image(type="pil", label="Input Image"),
|
71 |
+
gr.Slider(minimum=320, maximum=1280, value=640, step=32, label="Image Size"),
|
72 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.3, step=0.05, label="Confidence Threshold"),
|
73 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.6, step=0.05, label="IOU Threshold"),
|
74 |
+
]
|
75 |
+
|
76 |
+
outputs = [
|
77 |
+
gr.Image(type="pil", label="Output Image"),
|
78 |
+
gr.Textbox(label="Result"),
|
79 |
+
gr.Textbox(label="Solution")
|
80 |
+
]
|
81 |
+
|
82 |
+
examples = [
|
83 |
+
["/home/user/app/images/jagung7.jpg", 640, 0.3, 0.6],
|
84 |
+
["/home/user/app/images/jagung4.jpeg", 640, 0.3, 0.6],
|
85 |
+
["/home/user/app/images/jagung6.jpeg", 640, 0.3, 0.6]
|
86 |
+
]
|
87 |
+
|
88 |
+
title = """Corn Diseases Detection Finetuned YOLOv11 <br></br> <a href="https://colab.research.google.com/drive/1vnxtgPKOgfC8nyCL9hjrNFed75StsqGQ?usp=sharing"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Colab" style="display:inline-block;"> </a> """
|
89 |
+
description = 'Image Size: Defines the image size for inference.\nConfidence Treshold: Sets the minimum confidence threshold for detections.\nIOU Treshold: Intersection Over Union (IoU) threshold for Non-Maximum Suppression (NMS). Useful for reducing duplicates.'
|
90 |
|
91 |
video_iface = gr.Interface(
|
92 |
fn=process_video,
|
93 |
+
inputs=gr.Video(label="Upload Video", interactive=True),
|
94 |
+
outputs=gr.Image(type="pil", label="Result"),
|
|
|
|
|
95 |
title=title,
|
96 |
+
description="Upload video for inference."
|
|
|
|
|
|
|
97 |
)
|
98 |
|
99 |
+
image_iface = gr.Interface(
|
100 |
+
fn=response2,
|
101 |
+
inputs=inputs,
|
102 |
+
outputs=outputs,
|
103 |
+
examples=examples,
|
104 |
+
title=title,
|
105 |
+
description=description
|
106 |
+
)
|
107 |
|
108 |
demo = gr.TabbedInterface([image_iface, video_iface], ["Image Inference", "Video Inference"])
|
109 |
|