Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,16 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoImageProcessor, AutoModelForImageClassification
|
3 |
from PIL import Image
|
4 |
import torch
|
5 |
import numpy as np
|
6 |
-
import logging
|
7 |
import requests
|
8 |
-
import
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Configure Logging
|
11 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
@@ -20,30 +25,19 @@ except Exception as e:
|
|
20 |
logging.error(f"❌ Failed to load model: {str(e)}")
|
21 |
raise RuntimeError("Failed to load the model. Please check the logs for details.")
|
22 |
|
23 |
-
# Gemini API Key (Replace with your actual key)
|
24 |
-
GEMINI_API_KEY = os.getenv("AIzaSyCiRL0ES-zsJGJYsY03xmpwqcggDGcL2Fk", "AIzaSyCiRL0ES-zsJGJYsY03xmpwqcggDGcL2Fk")
|
25 |
-
|
26 |
# Function to Get AI-Powered Treatment Suggestions
|
27 |
def get_treatment_suggestions(disease_name):
|
28 |
-
url =
|
29 |
-
headers = {"
|
30 |
-
data = {
|
31 |
-
"prompt": { "text": f"Provide detailed organic and chemical treatment options, including dosage and preventive care, for {disease_name} in crops." },
|
32 |
-
"temperature": 0.7,
|
33 |
-
"candidate_count": 1
|
34 |
-
}
|
35 |
|
36 |
try:
|
37 |
response = requests.post(url, headers=headers, json=data)
|
38 |
if response.status_code == 200:
|
39 |
-
|
40 |
-
treatment = result.get("candidates", [{}])[0].get("content", "No treatment suggestions found.")
|
41 |
-
return treatment
|
42 |
else:
|
43 |
-
logging.error(f"API Error: {response.status_code} - {response.text}")
|
44 |
return f"API Error: {response.status_code}"
|
45 |
except Exception as e:
|
46 |
-
logging.error(f"Error fetching treatment suggestions: {str(e)}")
|
47 |
return "Error retrieving treatment details."
|
48 |
|
49 |
# Define Prediction Function
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
3 |
from transformers import AutoImageProcessor, AutoModelForImageClassification
|
4 |
from PIL import Image
|
5 |
import torch
|
6 |
import numpy as np
|
|
|
7 |
import requests
|
8 |
+
import logging
|
9 |
+
from dotenv import load_dotenv # Load .env file
|
10 |
+
|
11 |
+
# Load environment variables
|
12 |
+
load_dotenv()
|
13 |
+
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
|
14 |
|
15 |
# Configure Logging
|
16 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
25 |
logging.error(f"❌ Failed to load model: {str(e)}")
|
26 |
raise RuntimeError("Failed to load the model. Please check the logs for details.")
|
27 |
|
|
|
|
|
|
|
28 |
# Function to Get AI-Powered Treatment Suggestions
|
29 |
def get_treatment_suggestions(disease_name):
|
30 |
+
url = "https://api-inference.huggingface.co/models/OpenAGI/agriculture-gpt"
|
31 |
+
headers = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"}
|
32 |
+
data = {"inputs": f"What are the treatment options for {disease_name} in plants?"}
|
|
|
|
|
|
|
|
|
33 |
|
34 |
try:
|
35 |
response = requests.post(url, headers=headers, json=data)
|
36 |
if response.status_code == 200:
|
37 |
+
return response.json()[0]["generated_text"]
|
|
|
|
|
38 |
else:
|
|
|
39 |
return f"API Error: {response.status_code}"
|
40 |
except Exception as e:
|
|
|
41 |
return "Error retrieving treatment details."
|
42 |
|
43 |
# Define Prediction Function
|