Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import torch
|
|
5 |
import numpy as np
|
6 |
import json
|
7 |
import logging
|
|
|
8 |
import os
|
9 |
|
10 |
# Configure Logging
|
@@ -20,38 +21,30 @@ except Exception as e:
|
|
20 |
logging.error(f"❌ Failed to load model: {str(e)}")
|
21 |
raise RuntimeError("Failed to load the model. Please check the logs for details.")
|
22 |
|
23 |
-
#
|
24 |
-
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
# Image Validation
|
44 |
-
def validate_image(image):
|
45 |
-
if image.size[0] < 64 or image.size[1] < 64:
|
46 |
-
raise ValueError("⚠️ Image is too small. Please upload an image of at least 64x64 pixels.")
|
47 |
-
return image
|
48 |
|
49 |
-
# Prediction Function
|
50 |
def predict(image):
|
51 |
try:
|
52 |
image = Image.fromarray(np.uint8(image)).convert("RGB")
|
53 |
-
validate_image(image)
|
54 |
-
|
55 |
inputs = processor(images=image, return_tensors="pt")
|
56 |
with torch.no_grad():
|
57 |
outputs = model(**inputs)
|
@@ -59,9 +52,10 @@ def predict(image):
|
|
59 |
predicted_class_idx = logits.argmax(-1).item()
|
60 |
predicted_label = model.config.id2label[predicted_class_idx]
|
61 |
|
62 |
-
|
|
|
|
|
63 |
return f"Predicted Disease: {predicted_label}\nTreatment: {treatment}"
|
64 |
-
|
65 |
except Exception as e:
|
66 |
logging.error(f"Prediction failed: {str(e)}")
|
67 |
return f"❌ Prediction failed: {str(e)}"
|
@@ -71,10 +65,10 @@ iface = gr.Interface(
|
|
71 |
fn=predict,
|
72 |
inputs=gr.Image(type="numpy", label="Upload or capture plant image"),
|
73 |
outputs=gr.Textbox(label="Result"),
|
74 |
-
title="Plant Disease Detector",
|
75 |
-
description="Upload a plant leaf image to detect diseases and get treatment suggestions.",
|
76 |
allow_flagging="never",
|
77 |
)
|
78 |
|
79 |
# Launch Gradio App
|
80 |
-
iface.launch(
|
|
|
5 |
import numpy as np
|
6 |
import json
|
7 |
import logging
|
8 |
+
import requests
|
9 |
import os
|
10 |
|
11 |
# Configure Logging
|
|
|
21 |
logging.error(f"❌ Failed to load model: {str(e)}")
|
22 |
raise RuntimeError("Failed to load the model. Please check the logs for details.")
|
23 |
|
24 |
+
# Gemini API Key
|
25 |
+
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "AIzaSyDaWXCixp-KcO63-lCtCsFbjyXEIFsNV6k")
|
26 |
|
27 |
+
# Function to Get AI-Powered Treatment Suggestions
|
28 |
+
def get_treatment_suggestions(disease_name):
|
29 |
+
prompt = f"Provide detailed organic and chemical treatment options, including dosage and preventive care, for {disease_name} in crops."
|
30 |
+
url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateText"
|
31 |
+
headers = {"Content-Type": "application/json"}
|
32 |
+
data = {"prompt": prompt, "temperature": 0.7, "max_tokens": 250}
|
33 |
+
params = {"key": GEMINI_API_KEY}
|
34 |
+
try:
|
35 |
+
response = requests.post(url, headers=headers, json=data, params=params)
|
36 |
+
if response.status_code == 200:
|
37 |
+
return response.json().get("candidates", [{}])[0].get("output", "No treatment suggestions found.")
|
38 |
+
else:
|
39 |
+
return "Failed to fetch treatment suggestions."
|
40 |
+
except Exception as e:
|
41 |
+
logging.error(f"Error fetching treatment suggestions: {str(e)}")
|
42 |
+
return "Error retrieving treatment details."
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
# Define Prediction Function
|
45 |
def predict(image):
|
46 |
try:
|
47 |
image = Image.fromarray(np.uint8(image)).convert("RGB")
|
|
|
|
|
48 |
inputs = processor(images=image, return_tensors="pt")
|
49 |
with torch.no_grad():
|
50 |
outputs = model(**inputs)
|
|
|
52 |
predicted_class_idx = logits.argmax(-1).item()
|
53 |
predicted_label = model.config.id2label[predicted_class_idx]
|
54 |
|
55 |
+
# Get AI-generated treatment suggestions
|
56 |
+
treatment = get_treatment_suggestions(predicted_label)
|
57 |
+
|
58 |
return f"Predicted Disease: {predicted_label}\nTreatment: {treatment}"
|
|
|
59 |
except Exception as e:
|
60 |
logging.error(f"Prediction failed: {str(e)}")
|
61 |
return f"❌ Prediction failed: {str(e)}"
|
|
|
65 |
fn=predict,
|
66 |
inputs=gr.Image(type="numpy", label="Upload or capture plant image"),
|
67 |
outputs=gr.Textbox(label="Result"),
|
68 |
+
title="AI-Powered Plant Disease Detector",
|
69 |
+
description="Upload a plant leaf image to detect diseases and get AI-powered treatment suggestions.",
|
70 |
allow_flagging="never",
|
71 |
)
|
72 |
|
73 |
# Launch Gradio App
|
74 |
+
iface.launch()
|