Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
from torch import nn | |
from torchvision import models, transforms | |
from PIL import Image | |
import requests | |
import base64 | |
from io import BytesIO | |
import os | |
# Define the number of classes | |
num_classes = 2 # Update with the actual number of classes in your dataset | |
# Load the model (assuming you've already downloaded it) | |
def load_model(): | |
try: | |
model = models.resnet50(pretrained=False) | |
model.fc = nn.Linear(model.fc.in_features, num_classes) | |
model.load_state_dict(torch.load("path_to_your_model.pth", map_location=torch.device("cpu"))) | |
model.eval() | |
return model | |
except Exception as e: | |
print(f"Error loading model: {e}") | |
return None | |
model = load_model() | |
# Define the transformation for the input image | |
transform = transforms.Compose([ | |
transforms.Resize(256), | |
transforms.CenterCrop(224), | |
transforms.ToTensor(), | |
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), | |
]) | |
# Prediction function | |
def process_image(image, image_url=None): | |
try: | |
# Ensure that the image is not None | |
if image is None and not image_url: | |
return "No image or URL provided." | |
# Handle URL-based image loading | |
if image_url: | |
try: | |
response = requests.get(image_url) | |
response.raise_for_status() # Raise an error if the request fails | |
image = Image.open(BytesIO(response.content)) | |
except Exception as e: | |
return f"Error fetching image from URL: {e}" | |
# Handle local file path image loading (Gradio File input) | |
elif isinstance(image, str) and os.path.isfile(image): | |
try: | |
image = Image.open(image) | |
except Exception as e: | |
return f"Error loading image from local path: {e}" | |
# Validate that the image is loaded correctly | |
if not isinstance(image, Image.Image): | |
return "Invalid image format received." | |
# Apply transformations | |
image = transform(image).unsqueeze(0) | |
# Prediction | |
image = image.to(torch.device("cuda" if torch.cuda.is_available() else "cpu")) | |
with torch.no_grad(): | |
outputs = model(image) | |
predicted_class = torch.argmax(outputs, dim=1).item() | |
if predicted_class == 0: | |
return "The photo you've sent is of fall army worm with problem ID 126." | |
elif predicted_class == 1: | |
return "The photo you've sent is of a healthy maize image." | |
else: | |
return "Unexpected class prediction." | |
except Exception as e: | |
return f"Error processing image: {e}" | |
# Create the Gradio interface | |
iface = gr.Interface( | |
fn=process_image, | |
inputs=[ | |
gr.File(label="Upload an image (Local File Path)"), # Input: Local file | |
gr.Textbox(label="Enter Image URL", placeholder="Enter image URL here", lines=1) # Input: Image URL | |
], | |
outputs=gr.Textbox(label="Prediction Result"), # Output: Prediction result | |
live=True, | |
title="Maize Anomaly Detection", | |
description="Upload an image of maize to detect anomalies like disease or pest infestation. You can upload local images or provide an image URL." | |
) | |
# Launch the Gradio interface | |
iface.launch(share=True, show_error=True) | |