File size: 5,308 Bytes
80085db 1189528 80085db 37f2485 80085db fefe5b0 a914cae 008a68e 80085db aceedd8 37f2485 82ad613 80085db 6ba34ec 80085db 2fbad9d 37f2485 80085db 37f2485 6ba34ec 80085db 37f2485 80085db 6ba34ec 37f2485 6ba34ec 37f2485 6ba34ec 37f2485 6ba34ec 37f2485 80085db 37f2485 80085db 6ba34ec 80085db 6ba34ec 80085db 82ad613 6ba34ec 37f2485 82ad613 6ba34ec 80085db 82ad613 80085db 6ba34ec 80085db 82ad613 6ba34ec 82ad613 80085db 37f2485 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import gradio as gr
import matplotlib.pyplot as plt
from PIL import Image
from ultralytics import YOLO
import cv2
import numpy as np
import requests
import json
import os
model = YOLO('Corn-Disease50Epoch.pt')
name = ['Corn Rust', 'Leaf Blight', 'Gray Leaf Spot', 'Healthy']
image_directory = "/home/user/app/images"
API_KEY = os.environ.get("API_KEY")
def get_deepseek_solution(disease_name):
try:
response = requests.post(
url="https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
},
data=json.dumps({
"model": "deepseek/deepseek-r1-distill-llama-70b:free",
"messages": [
{
"role": "system",
"content": "Anda adalah asisten yang hanya dapat memberikan jawaban berdasarkan materi yang diberikan."
},
{
"role": "user",
"content": f"Apa penyebab dan solusi penyakit jagung '{disease_name}'?"
}
]
})
)
if response.status_code == 200:
result = response.json()
return result.get("choices", [{}])[0].get("message", {}).get("content", "").strip()
else:
return "DeepSeek gagal memberikan jawaban (kode error: {}).".format(response.status_code)
except requests.exceptions.RequestException as e:
return f"Gagal terhubung ke DeepSeek: {e}"
def response2(image, image_size=640, conf_threshold=0.3, iou_threshold=0.6):
results = model.predict(image, conf=conf_threshold, iou=iou_threshold, imgsz=image_size)
text = ""
solution = ""
detected_diseases = set()
for r in results:
im_array = r.plot()
im = Image.fromarray(im_array[..., ::-1])
for r in results:
conf = np.array(r.boxes.conf.cpu())
cls = np.array(r.boxes.cls.cpu()).astype(int)
xywh = np.array(r.boxes.xywh.cpu()).astype(int)
for con, cl, xy in zip(conf, cls, xywh):
if cl < len(name):
disease_name = name[cl]
else:
disease_name = "Unknown"
confidence = round(float(con) * 100, 1)
text += f"Detected {disease_name} with confidence {confidence}% at ({xy[0]},{xy[1]})\n"
detected_diseases.add(disease_name)
explanation_cache = {}
for disease in detected_diseases:
if disease.lower() == "healthy":
solution += f"\n--- {disease} ---\nTanaman tampak sehat. Tidak ada tindakan diperlukan.\n"
elif disease in name:
if disease not in explanation_cache:
explanation_cache[disease] = get_deepseek_solution(disease)
solution += f"\n--- {disease} ---\n{explanation_cache[disease]}\n"
else:
solution += f"\n--- {disease} ---\nJenis penyakit tidak dikenali. Tidak dapat memberikan solusi.\n"
return im, text.strip(), solution.strip()
def pil_to_cv2(pil_image):
return cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
def process_video(video_path):
cap = cv2.VideoCapture(video_path)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
pil_img = Image.fromarray(frame[..., ::-1])
result = model.predict(source=pil_img)
for r in result:
im_array = r.plot()
processed_frame = Image.fromarray(im_array[..., ::-1])
yield processed_frame
cap.release()
inputs = [
gr.Image(type="pil", label="Input Image"),
gr.Slider(minimum=320, maximum=1280, value=640, step=32, label="Image Size"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.3, step=0.05, label="Confidence Threshold"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.6, step=0.05, label="IOU Threshold"),
]
outputs = [
gr.Image(type="pil", label="Output Image"),
gr.Textbox(label="Result"),
gr.Textbox(label="AI-Powered Solution")
]
examples = [
["/home/user/app/images/jagung7.jpg", 640, 0.3, 0.6],
["/home/user/app/images/jagung4.jpeg", 640, 0.3, 0.6],
["/home/user/app/images/jagung6.jpeg", 640, 0.3, 0.6]
]
title = """Corn Diseases Detection Finetuned YOLOv11 <br></br> <a href="https://colab.research.google.com/drive/1vnxtgPKOgfC8nyCL9hjrNFed75StsqGQ?usp=sharing"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Colab" style="display:inline-block;"> </a> """
description = 'Image Size: Ukuran gambar untuk inferensi.\nConfidence Threshold: Minimum confidence untuk deteksi.\nIOU Threshold: Threshold untuk Non-Maximum Suppression (NMS).'
video_iface = gr.Interface(
fn=process_video,
inputs=gr.Video(label="Upload Video", interactive=True),
outputs=gr.Image(type="pil", label="Result"),
title=title,
description="Upload video untuk deteksi penyakit jagung."
)
image_iface = gr.Interface(
fn=response2,
inputs=inputs,
outputs=outputs,
examples=examples,
title=title,
description=description
)
demo = gr.TabbedInterface([image_iface, video_iface], ["Image Inference", "Video Inference"])
if __name__ == '__main__':
demo.launch()
|