Spaces:
Sleeping
Sleeping
File size: 3,188 Bytes
f32e001 be7dd52 4bb0527 be7dd52 4e5cace 2fb3db3 f32e001 be7dd52 4bb0527 be7dd52 4bb0527 be7dd52 71e1d4e 2fb3db3 be7dd52 4e5cace 5ba7c4e 4e5cace 71e1d4e 2fb3db3 71e1d4e 2fb3db3 71e1d4e 2fb3db3 4e5cace 5ba7c4e 4e5cace 71e1d4e 2fb3db3 71e1d4e 4e5cace 2fb3db3 4e5cace be7dd52 4e5cace be7dd52 4bb0527 be7dd52 4bb0527 4e5cace 4bb0527 be7dd52 4bb0527 be7dd52 f32e001 4bb0527 3001c11 4e5cace 4bb0527 f32e001 4bb0527 4e5cace |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import gradio as gr
from transformers import AutoImageProcessor, AutoModelForImageClassification
from PIL import Image
import torch
import numpy as np
import logging
import requests
import os
# Configure Logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
# Load Model & Processor
model_name = "linkanjarad/mobilenet_v2_1.0_224-plant-disease-identification"
try:
processor = AutoImageProcessor.from_pretrained(model_name, use_fast=True)
model = AutoModelForImageClassification.from_pretrained(model_name)
logging.info("✅ Model and processor loaded successfully.")
except Exception as e:
logging.error(f"❌ Failed to load model: {str(e)}")
raise RuntimeError("Failed to load the model. Please check the logs for details.")
# Gemini API Key (Replace with your actual key)
GEMINI_API_KEY = os.getenv("AIzaSyCiRL0ES-zsJGJYsY03xmpwqcggDGcL2Fk", "AIzaSyCiRL0ES-zsJGJYsY03xmpwqcggDGcL2Fk")
# Function to Get AI-Powered Treatment Suggestions
def get_treatment_suggestions(disease_name):
url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateText?key={GEMINI_API_KEY}"
headers = {"Content-Type": "application/json"}
data = {
"prompt": { "text": f"Provide detailed organic and chemical treatment options, including dosage and preventive care, for {disease_name} in crops." },
"temperature": 0.7,
"candidate_count": 1
}
try:
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
result = response.json()
treatment = result.get("candidates", [{}])[0].get("content", "No treatment suggestions found.")
return treatment
else:
logging.error(f"API Error: {response.status_code} - {response.text}")
return f"API Error: {response.status_code}"
except Exception as e:
logging.error(f"Error fetching treatment suggestions: {str(e)}")
return "Error retrieving treatment details."
# Define Prediction Function
def predict(image):
try:
image = Image.fromarray(np.uint8(image)).convert("RGB")
inputs = processor(images=image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
predicted_class_idx = logits.argmax(-1).item()
predicted_label = model.config.id2label[predicted_class_idx]
# Get AI-generated treatment suggestions
treatment = get_treatment_suggestions(predicted_label)
return f"Predicted Disease: {predicted_label}\nTreatment: {treatment}"
except Exception as e:
logging.error(f"Prediction failed: {str(e)}")
return f"❌ Prediction failed: {str(e)}"
# Gradio Interface
iface = gr.Interface(
fn=predict,
inputs=gr.Image(type="numpy", label="Upload or capture plant image"),
outputs=gr.Textbox(label="Result"),
title="AI-Powered Plant Disease Detector",
description="Upload a plant leaf image to detect diseases and get AI-powered treatment suggestions.",
allow_flagging="never",
)
# Launch Gradio App
iface.launch()
|