Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -46,22 +46,35 @@ transform = transforms.Compose([
|
|
46 |
|
47 |
def predict(input_data):
|
48 |
try:
|
49 |
-
|
|
|
|
|
50 |
if isinstance(input_data, str): # If it's a string, assume it's a URL
|
51 |
-
|
52 |
-
|
|
|
53 |
img = Image.open(BytesIO(response.content))
|
54 |
-
|
55 |
-
|
|
|
|
|
56 |
else: # If it's not a string, assume it's an image file
|
57 |
img = input_data
|
58 |
|
59 |
-
#
|
60 |
if not isinstance(img, Image.Image):
|
|
|
61 |
return json.dumps({"error": "Invalid image format received. Please provide a valid image."})
|
|
|
|
|
62 |
|
63 |
# Apply transformations to the image
|
64 |
img = transform(img).unsqueeze(0)
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
# Move the image to the correct device
|
67 |
img = img.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
|
@@ -70,6 +83,7 @@ def predict(input_data):
|
|
70 |
with torch.no_grad():
|
71 |
outputs = model(img)
|
72 |
predicted_class = torch.argmax(outputs, dim=1).item()
|
|
|
73 |
|
74 |
# Return the result based on the predicted class
|
75 |
if predicted_class == 0:
|
@@ -80,9 +94,11 @@ def predict(input_data):
|
|
80 |
return json.dumps({"error": "Unexpected class prediction."})
|
81 |
|
82 |
except Exception as e:
|
|
|
83 |
return json.dumps({"error": f"Error processing image: {e}"})
|
84 |
|
85 |
|
|
|
86 |
# Create the Gradio interface with both local file upload and URL input
|
87 |
iface = gr.Interface(
|
88 |
fn=predict,
|
|
|
46 |
|
47 |
def predict(input_data):
|
48 |
try:
|
49 |
+
print(f"Input data received: {input_data}, Type: {type(input_data)}")
|
50 |
+
|
51 |
+
# Check if the input is a URL or image
|
52 |
if isinstance(input_data, str): # If it's a string, assume it's a URL
|
53 |
+
try:
|
54 |
+
response = requests.get(input_data)
|
55 |
+
response.raise_for_status() # Raise error for HTTP issues
|
56 |
img = Image.open(BytesIO(response.content))
|
57 |
+
print("Image fetched successfully from URL.")
|
58 |
+
except Exception as e:
|
59 |
+
print(f"Error fetching image from URL: {e}")
|
60 |
+
return json.dumps({"error": f"Failed to fetch image from URL: {e}"})
|
61 |
else: # If it's not a string, assume it's an image file
|
62 |
img = input_data
|
63 |
|
64 |
+
# Validate the image
|
65 |
if not isinstance(img, Image.Image):
|
66 |
+
print("Invalid image format received.")
|
67 |
return json.dumps({"error": "Invalid image format received. Please provide a valid image."})
|
68 |
+
else:
|
69 |
+
print(f"Image successfully loaded: {img}")
|
70 |
|
71 |
# Apply transformations to the image
|
72 |
img = transform(img).unsqueeze(0)
|
73 |
+
print(f"Transformed image tensor shape: {img.shape}")
|
74 |
+
|
75 |
+
# Ensure model is loaded
|
76 |
+
if model is None:
|
77 |
+
return json.dumps({"error": "Model not loaded. Ensure the model file is available and correctly loaded."})
|
78 |
|
79 |
# Move the image to the correct device
|
80 |
img = img.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
|
|
|
83 |
with torch.no_grad():
|
84 |
outputs = model(img)
|
85 |
predicted_class = torch.argmax(outputs, dim=1).item()
|
86 |
+
print(f"Model prediction outputs: {outputs}, Predicted class: {predicted_class}")
|
87 |
|
88 |
# Return the result based on the predicted class
|
89 |
if predicted_class == 0:
|
|
|
94 |
return json.dumps({"error": "Unexpected class prediction."})
|
95 |
|
96 |
except Exception as e:
|
97 |
+
print(f"Error processing image: {e}")
|
98 |
return json.dumps({"error": f"Error processing image: {e}"})
|
99 |
|
100 |
|
101 |
+
|
102 |
# Create the Gradio interface with both local file upload and URL input
|
103 |
iface = gr.Interface(
|
104 |
fn=predict,
|