Dricz's picture
Update app.py
a914cae verified
import gradio as gr
import matplotlib.pyplot as plt
from PIL import Image
from ultralytics import YOLO
import cv2
import numpy as np
import requests
import json
import os
model = YOLO('Corn-Disease50Epoch.pt')
name = ['Corn Rust', 'Leaf Blight', 'Gray Leaf Spot', 'Healthy']
image_directory = "/home/user/app/images"
API_KEY = os.environ.get("API_KEY")
def get_deepseek_solution(disease_name):
try:
response = requests.post(
url="https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
},
data=json.dumps({
"model": "deepseek/deepseek-r1-distill-llama-70b:free",
"messages": [
{
"role": "system",
"content": "Anda adalah asisten yang hanya dapat memberikan jawaban berdasarkan materi yang diberikan."
},
{
"role": "user",
"content": f"Apa penyebab dan solusi penyakit jagung '{disease_name}'?"
}
]
})
)
if response.status_code == 200:
result = response.json()
return result.get("choices", [{}])[0].get("message", {}).get("content", "").strip()
else:
return "DeepSeek gagal memberikan jawaban (kode error: {}).".format(response.status_code)
except requests.exceptions.RequestException as e:
return f"Gagal terhubung ke DeepSeek: {e}"
def response2(image, image_size=640, conf_threshold=0.3, iou_threshold=0.6):
results = model.predict(image, conf=conf_threshold, iou=iou_threshold, imgsz=image_size)
text = ""
solution = ""
detected_diseases = set()
for r in results:
im_array = r.plot()
im = Image.fromarray(im_array[..., ::-1])
for r in results:
conf = np.array(r.boxes.conf.cpu())
cls = np.array(r.boxes.cls.cpu()).astype(int)
xywh = np.array(r.boxes.xywh.cpu()).astype(int)
for con, cl, xy in zip(conf, cls, xywh):
if cl < len(name):
disease_name = name[cl]
else:
disease_name = "Unknown"
confidence = round(float(con) * 100, 1)
text += f"Detected {disease_name} with confidence {confidence}% at ({xy[0]},{xy[1]})\n"
detected_diseases.add(disease_name)
explanation_cache = {}
for disease in detected_diseases:
if disease.lower() == "healthy":
solution += f"\n--- {disease} ---\nTanaman tampak sehat. Tidak ada tindakan diperlukan.\n"
elif disease in name:
if disease not in explanation_cache:
explanation_cache[disease] = get_deepseek_solution(disease)
solution += f"\n--- {disease} ---\n{explanation_cache[disease]}\n"
else:
solution += f"\n--- {disease} ---\nJenis penyakit tidak dikenali. Tidak dapat memberikan solusi.\n"
return im, text.strip(), solution.strip()
def pil_to_cv2(pil_image):
return cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
def process_video(video_path):
cap = cv2.VideoCapture(video_path)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
pil_img = Image.fromarray(frame[..., ::-1])
result = model.predict(source=pil_img)
for r in result:
im_array = r.plot()
processed_frame = Image.fromarray(im_array[..., ::-1])
yield processed_frame
cap.release()
inputs = [
gr.Image(type="pil", label="Input Image"),
gr.Slider(minimum=320, maximum=1280, value=640, step=32, label="Image Size"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.3, step=0.05, label="Confidence Threshold"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.6, step=0.05, label="IOU Threshold"),
]
outputs = [
gr.Image(type="pil", label="Output Image"),
gr.Textbox(label="Result"),
gr.Textbox(label="AI-Powered Solution")
]
examples = [
["/home/user/app/images/jagung7.jpg", 640, 0.3, 0.6],
["/home/user/app/images/jagung4.jpeg", 640, 0.3, 0.6],
["/home/user/app/images/jagung6.jpeg", 640, 0.3, 0.6]
]
title = """Corn Diseases Detection Finetuned YOLOv11 <br></br> <a href="https://colab.research.google.com/drive/1vnxtgPKOgfC8nyCL9hjrNFed75StsqGQ?usp=sharing"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Colab" style="display:inline-block;"> </a> """
description = 'Image Size: Ukuran gambar untuk inferensi.\nConfidence Threshold: Minimum confidence untuk deteksi.\nIOU Threshold: Threshold untuk Non-Maximum Suppression (NMS).'
video_iface = gr.Interface(
fn=process_video,
inputs=gr.Video(label="Upload Video", interactive=True),
outputs=gr.Image(type="pil", label="Result"),
title=title,
description="Upload video untuk deteksi penyakit jagung."
)
image_iface = gr.Interface(
fn=response2,
inputs=inputs,
outputs=outputs,
examples=examples,
title=title,
description=description
)
demo = gr.TabbedInterface([image_iface, video_iface], ["Image Inference", "Video Inference"])
if __name__ == '__main__':
demo.launch()