File size: 2,734 Bytes
0a131b9
f32e001
be7dd52
4bb0527
be7dd52
 
4e5cace
0a131b9
 
 
 
 
 
f32e001
be7dd52
4bb0527
be7dd52
 
 
4bb0527
 
 
 
 
 
 
be7dd52
4e5cace
 
0a131b9
 
 
2fb3db3
4e5cace
5ba7c4e
4e5cace
0a131b9
4e5cace
2fb3db3
4e5cace
 
be7dd52
4e5cace
be7dd52
 
4bb0527
be7dd52
 
 
 
 
 
4bb0527
4e5cace
 
 
4bb0527
be7dd52
 
4bb0527
be7dd52
 
f32e001
4bb0527
 
 
3001c11
4e5cace
4bb0527
f32e001
 
4bb0527
4e5cace
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import os
import gradio as gr
from transformers import AutoImageProcessor, AutoModelForImageClassification
from PIL import Image
import torch
import numpy as np
import requests
import logging
from dotenv import load_dotenv  # Load .env file

# Load environment variables
load_dotenv()
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")

# Configure Logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")

# Load Model & Processor
model_name = "linkanjarad/mobilenet_v2_1.0_224-plant-disease-identification"
try:
    processor = AutoImageProcessor.from_pretrained(model_name, use_fast=True)
    model = AutoModelForImageClassification.from_pretrained(model_name)
    logging.info("✅ Model and processor loaded successfully.")
except Exception as e:
    logging.error(f"❌ Failed to load model: {str(e)}")
    raise RuntimeError("Failed to load the model. Please check the logs for details.")

# Function to Get AI-Powered Treatment Suggestions
def get_treatment_suggestions(disease_name):
    url = "https://api-inference.huggingface.co/models/OpenAGI/agriculture-gpt"
    headers = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"}
    data = {"inputs": f"What are the treatment options for {disease_name} in plants?"}

    try:
        response = requests.post(url, headers=headers, json=data)
        if response.status_code == 200:
            return response.json()[0]["generated_text"]
        else:
            return f"API Error: {response.status_code}"
    except Exception as e:
        return "Error retrieving treatment details."

# Define Prediction Function
def predict(image):
    try:
        image = Image.fromarray(np.uint8(image)).convert("RGB")
        inputs = processor(images=image, return_tensors="pt")
        with torch.no_grad():
            outputs = model(**inputs)
        logits = outputs.logits
        predicted_class_idx = logits.argmax(-1).item()
        predicted_label = model.config.id2label[predicted_class_idx]
        
        # Get AI-generated treatment suggestions
        treatment = get_treatment_suggestions(predicted_label)
        
        return f"Predicted Disease: {predicted_label}\nTreatment: {treatment}"
    except Exception as e:
        logging.error(f"Prediction failed: {str(e)}")
        return f"❌ Prediction failed: {str(e)}"

# Gradio Interface
iface = gr.Interface(
    fn=predict,
    inputs=gr.Image(type="numpy", label="Upload or capture plant image"),
    outputs=gr.Textbox(label="Result"),
    title="AI-Powered Plant Disease Detector",
    description="Upload a plant leaf image to detect diseases and get AI-powered treatment suggestions.",
    allow_flagging="never",
)

# Launch Gradio App
iface.launch()