Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
# app.py
|
2 |
-
|
3 |
import uvicorn
|
4 |
import base64
|
5 |
import io
|
@@ -9,23 +7,18 @@ from pathlib import Path
|
|
9 |
from fastai.vision.all import *
|
10 |
|
11 |
# FastHTML imports
|
12 |
-
from fasthtml.common import *
|
13 |
-
from fasthtml.core import FastHTML
|
14 |
-
from fasthtml.components import FileInput
|
15 |
-
from fastcore.utils import *
|
16 |
|
17 |
# --- Configuration ---
|
18 |
-
# Ensure the path to your exported model is correct
|
19 |
-
# When deploying to HF Spaces, this relative path works if model.pkl is in the same directory
|
20 |
MODEL_PATH = Path(__file__).parent / 'model.pkl'
|
21 |
-
# Set device (CPU is often the default/safest for HF free tier)
|
22 |
-
# Use 'cuda' if you have a GPU and want to use it: defaults_device(use_cuda=True)
|
23 |
defaults.device = torch.device('cpu')
|
24 |
|
25 |
# --- Load Fastai Learner ---
|
26 |
-
# Load the model once when the application starts
|
27 |
try:
|
28 |
-
print(f"Attempting to load model from: {MODEL_PATH.resolve()}")
|
29 |
if not MODEL_PATH.is_file():
|
30 |
raise FileNotFoundError(f"Model file not found at calculated path: {MODEL_PATH.resolve()}")
|
31 |
learn = load_learner(MODEL_PATH)
|
@@ -36,17 +29,14 @@ try:
|
|
36 |
except FileNotFoundError as e:
|
37 |
print(f"Error: {e}")
|
38 |
print("Please make sure 'model.pkl' is in the same directory as app.py.")
|
39 |
-
# Exit if model loading fails, as the app cannot function
|
40 |
raise SystemExit(f"CRITICAL ERROR: Model file not found at {MODEL_PATH}. Application cannot start.")
|
41 |
except Exception as e:
|
42 |
print(f"CRITICAL ERROR: An unexpected error occurred loading the model: {e}")
|
43 |
-
# Exit for any other critical model loading error
|
44 |
raise SystemExit(f"CRITICAL ERROR: Failed to load model. Application cannot start. Error: {e}")
|
45 |
|
46 |
# --- FastHTML App Setup ---
|
47 |
-
# FastHTML/Uvicorn will automatically find this 'app' object when run via 'uvicorn app:app'
|
48 |
app = FastHTML()
|
49 |
-
rt = app.route
|
50 |
|
51 |
# --- Helper Function for Prediction ---
|
52 |
def predict_image(img_bytes: bytes):
|
@@ -63,129 +53,121 @@ def predict_image(img_bytes: bytes):
|
|
63 |
return pred_class, confidence
|
64 |
except Exception as e:
|
65 |
print(f"Error during prediction: {e}")
|
66 |
-
# Return a user-friendly error message and neutral confidence
|
67 |
return f"Prediction Error: Could not process image ({e})", 0.0
|
68 |
|
69 |
# --- Define Routes ---
|
70 |
-
|
71 |
@rt("/")
|
72 |
async def get(request):
|
73 |
"""Serves the main page with the upload form."""
|
74 |
-
# Using Bootstrap classes for basic styling and layout
|
75 |
return Titled("Fastai Image Classifier",
|
76 |
-
Main(
|
77 |
H1("Upload an Image for Classification"),
|
78 |
# --- Form for uploading the image ---
|
79 |
-
# Arguments MUST be ordered: Positional arguments (content) first, then Keyword arguments (attributes)
|
80 |
Form(
|
81 |
-
#
|
82 |
-
Div(
|
83 |
-
|
84 |
-
|
85 |
),
|
86 |
-
|
87 |
-
|
88 |
-
#
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
), # End of Form component arguments
|
97 |
# --- Loading Indicator ---
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
101 |
),
|
102 |
# --- Results Area ---
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
|
|
|
|
107 |
|
108 |
@rt("/predict", methods=["POST"])
|
109 |
async def post(request, file: Upload):
|
110 |
"""Handles image upload, performs prediction, and returns results as an HTML fragment."""
|
111 |
-
# --- Input Validation ---
|
112 |
if not file or not file.filename:
|
113 |
-
|
114 |
-
|
|
|
|
|
115 |
|
116 |
-
# Basic check for allowed image file extensions
|
117 |
allowed_extensions = {'.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp'}
|
118 |
file_ext = Path(file.filename).suffix.lower()
|
119 |
if file_ext not in allowed_extensions:
|
120 |
-
|
121 |
-
|
|
|
|
|
122 |
|
123 |
print(f"Received file: {file.filename}, Content-Type: {file.content_type}, Size: {file.size}")
|
124 |
|
125 |
-
# --- Read Image Data ---
|
126 |
try:
|
127 |
-
img_bytes = await file.read()
|
128 |
if not img_bytes:
|
129 |
-
|
130 |
-
|
|
|
|
|
131 |
except Exception as e:
|
132 |
print(f"Error reading uploaded file: {e}")
|
133 |
-
|
134 |
-
|
|
|
|
|
135 |
|
136 |
# --- Perform Prediction ---
|
137 |
prediction, confidence = predict_image(img_bytes)
|
138 |
|
139 |
-
#
|
140 |
-
# Encode image to base64 to display a preview, only if prediction was okay
|
141 |
img_src = None
|
142 |
-
if "Error" not in str(prediction):
|
143 |
try:
|
144 |
img_base64 = base64.b64encode(img_bytes).decode('utf-8')
|
145 |
-
|
146 |
-
content_type = file.content_type if file.content_type and file.content_type.startswith('image/') else 'image/jpeg'
|
147 |
img_src = f"data:{content_type};base64,{img_base64}"
|
148 |
except Exception as e:
|
149 |
print(f"Error encoding image to base64: {e}")
|
150 |
-
# Log error, but proceed without image preview
|
151 |
|
152 |
-
# Determine result styling based on success or failure
|
153 |
result_cls = "alert alert-danger" if "Error" in str(prediction) else "alert alert-success"
|
154 |
|
155 |
-
# --- Return HTML Fragment ---
|
156 |
-
# This HTML will replace the content of the #results div
|
157 |
return Div(
|
158 |
# Display image preview if available
|
159 |
-
(Img(src=img_src, alt="Uploaded Image Preview",
|
|
|
|
|
160 |
# Display prediction results or error message
|
161 |
-
Div(
|
162 |
P(Strong("Prediction: "), f"{prediction}"),
|
163 |
-
|
164 |
-
|
|
|
165 |
),
|
166 |
-
|
167 |
-
|
168 |
-
id="results", # Adding id here ensures the target div itself is replaced if needed, though innerHTML swap is default
|
169 |
-
hx_swap_oob="true" # Example if you wanted to update multiple targets, not needed here for innerHTML swap.
|
170 |
)
|
171 |
|
172 |
-
|
173 |
# --- Add CSS/JS Headers ---
|
174 |
-
# Include Bootstrap CSS for styling and JS for potential component interactions (like dropdowns, modals, etc., though not used here)
|
175 |
-
# FastHTML automatically includes HTMX
|
176 |
app.sheets.append(
|
177 |
-
Link(href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css", rel="stylesheet",
|
|
|
178 |
)
|
179 |
app.hdrs.append(
|
180 |
-
Script(src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js",
|
|
|
181 |
)
|
182 |
|
183 |
# --- Run the App (for local development) ---
|
184 |
-
# This block is executed when you run `python app.py` directly.
|
185 |
-
# Hugging Face Spaces will use its own mechanism to run the 'app' object via an ASGI server like Uvicorn.
|
186 |
if __name__ == "__main__":
|
187 |
print("Starting Uvicorn server for local development...")
|
188 |
-
|
189 |
-
# Port 8000 is a common choice for web development
|
190 |
-
# reload=True automatically restarts the server when code changes (useful for development)
|
191 |
-
uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True)
|
|
|
|
|
|
|
1 |
import uvicorn
|
2 |
import base64
|
3 |
import io
|
|
|
7 |
from fastai.vision.all import *
|
8 |
|
9 |
# FastHTML imports
|
10 |
+
from fasthtml.common import * # Imports common HTML tags, App, etc.
|
11 |
+
from fasthtml.core import FastHTML # Base class if needed, but App is usually sufficient
|
12 |
+
from fasthtml.components import FileInput # Specific component for file input
|
13 |
+
from fastcore.utils import * # For Upload class
|
14 |
|
15 |
# --- Configuration ---
|
|
|
|
|
16 |
MODEL_PATH = Path(__file__).parent / 'model.pkl'
|
|
|
|
|
17 |
defaults.device = torch.device('cpu')
|
18 |
|
19 |
# --- Load Fastai Learner ---
|
|
|
20 |
try:
|
21 |
+
print(f"Attempting to load model from: {MODEL_PATH.resolve()}")
|
22 |
if not MODEL_PATH.is_file():
|
23 |
raise FileNotFoundError(f"Model file not found at calculated path: {MODEL_PATH.resolve()}")
|
24 |
learn = load_learner(MODEL_PATH)
|
|
|
29 |
except FileNotFoundError as e:
|
30 |
print(f"Error: {e}")
|
31 |
print("Please make sure 'model.pkl' is in the same directory as app.py.")
|
|
|
32 |
raise SystemExit(f"CRITICAL ERROR: Model file not found at {MODEL_PATH}. Application cannot start.")
|
33 |
except Exception as e:
|
34 |
print(f"CRITICAL ERROR: An unexpected error occurred loading the model: {e}")
|
|
|
35 |
raise SystemExit(f"CRITICAL ERROR: Failed to load model. Application cannot start. Error: {e}")
|
36 |
|
37 |
# --- FastHTML App Setup ---
|
|
|
38 |
app = FastHTML()
|
39 |
+
rt = app.route # Shortcut for the route decorator
|
40 |
|
41 |
# --- Helper Function for Prediction ---
|
42 |
def predict_image(img_bytes: bytes):
|
|
|
53 |
return pred_class, confidence
|
54 |
except Exception as e:
|
55 |
print(f"Error during prediction: {e}")
|
|
|
56 |
return f"Prediction Error: Could not process image ({e})", 0.0
|
57 |
|
58 |
# --- Define Routes ---
|
|
|
59 |
@rt("/")
|
60 |
async def get(request):
|
61 |
"""Serves the main page with the upload form."""
|
|
|
62 |
return Titled("Fastai Image Classifier",
|
63 |
+
Main(
|
64 |
H1("Upload an Image for Classification"),
|
65 |
# --- Form for uploading the image ---
|
|
|
66 |
Form(
|
67 |
+
# Positional argument: Div that contains the file input
|
68 |
+
Div(
|
69 |
+
FileInput(name="file", id="fileInput", cls="form-control", required=True, accept="image/*"),
|
70 |
+
cls="mb-3"
|
71 |
),
|
72 |
+
# Positional argument: Submit button
|
73 |
+
Button("Classify Image", type="submit", cls="btn btn-primary"),
|
74 |
+
# Keyword arguments: Form attributes
|
75 |
+
hx_post="/predict",
|
76 |
+
hx_target="#results",
|
77 |
+
hx_swap="innerHTML",
|
78 |
+
hx_encoding="multipart/form-data",
|
79 |
+
hx_indicator="#loading-spinner",
|
80 |
+
id="upload-form"
|
81 |
+
),
|
|
|
82 |
# --- Loading Indicator ---
|
83 |
+
Div(
|
84 |
+
Span("Loading...", cls="visually-hidden"),
|
85 |
+
id="loading-spinner", cls="htmx-indicator spinner-border mt-3",
|
86 |
+
role="status", style="display: none;"
|
87 |
),
|
88 |
# --- Results Area ---
|
89 |
+
Div(
|
90 |
+
id="results", cls="mt-4"
|
91 |
+
),
|
92 |
+
cls="container mt-4"
|
93 |
+
)
|
94 |
+
)
|
95 |
|
96 |
@rt("/predict", methods=["POST"])
|
97 |
async def post(request, file: Upload):
|
98 |
"""Handles image upload, performs prediction, and returns results as an HTML fragment."""
|
|
|
99 |
if not file or not file.filename:
|
100 |
+
return Div(
|
101 |
+
P("No file uploaded. Please select an image file.", cls="alert alert-warning mt-3"),
|
102 |
+
id="results"
|
103 |
+
)
|
104 |
|
|
|
105 |
allowed_extensions = {'.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp'}
|
106 |
file_ext = Path(file.filename).suffix.lower()
|
107 |
if file_ext not in allowed_extensions:
|
108 |
+
return Div(
|
109 |
+
P(f"Invalid file type: '{file_ext}'. Please upload an image ({', '.join(allowed_extensions)}).", cls="alert alert-danger mt-3"),
|
110 |
+
id="results"
|
111 |
+
)
|
112 |
|
113 |
print(f"Received file: {file.filename}, Content-Type: {file.content_type}, Size: {file.size}")
|
114 |
|
|
|
115 |
try:
|
116 |
+
img_bytes = await file.read() # Read the file content asynchronously
|
117 |
if not img_bytes:
|
118 |
+
return Div(
|
119 |
+
P("Uploaded file appears to be empty.", cls="alert alert-warning mt-3"),
|
120 |
+
id="results"
|
121 |
+
)
|
122 |
except Exception as e:
|
123 |
print(f"Error reading uploaded file: {e}")
|
124 |
+
return Div(
|
125 |
+
P(f"Error reading uploaded file: {e}", cls="alert alert-danger mt-3"),
|
126 |
+
id="results"
|
127 |
+
)
|
128 |
|
129 |
# --- Perform Prediction ---
|
130 |
prediction, confidence = predict_image(img_bytes)
|
131 |
|
132 |
+
# Encode image to base64 for preview (only if prediction was successful)
|
|
|
133 |
img_src = None
|
134 |
+
if "Error" not in str(prediction):
|
135 |
try:
|
136 |
img_base64 = base64.b64encode(img_bytes).decode('utf-8')
|
137 |
+
content_type = file.content_type if (file.content_type and file.content_type.startswith('image/')) else 'image/jpeg'
|
|
|
138 |
img_src = f"data:{content_type};base64,{img_base64}"
|
139 |
except Exception as e:
|
140 |
print(f"Error encoding image to base64: {e}")
|
|
|
141 |
|
|
|
142 |
result_cls = "alert alert-danger" if "Error" in str(prediction) else "alert alert-success"
|
143 |
|
|
|
|
|
144 |
return Div(
|
145 |
# Display image preview if available
|
146 |
+
(Img(src=img_src, alt="Uploaded Image Preview",
|
147 |
+
style="max-width: 300px; max-height: 300px; margin-top: 15px; margin-bottom: 10px; display: block; border: 1px solid #ddd;")
|
148 |
+
if img_src else P("Preview not available.")),
|
149 |
# Display prediction results or error message
|
150 |
+
Div(
|
151 |
P(Strong("Prediction: "), f"{prediction}"),
|
152 |
+
(P(Strong("Confidence: "), f"{confidence:.4f}") if "Error" not in str(prediction) else None),
|
153 |
+
cls=f"{result_cls} mt-3",
|
154 |
+
role="alert"
|
155 |
),
|
156 |
+
id="results",
|
157 |
+
hx_swap_oob="true"
|
|
|
|
|
158 |
)
|
159 |
|
|
|
160 |
# --- Add CSS/JS Headers ---
|
|
|
|
|
161 |
app.sheets.append(
|
162 |
+
Link(href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css", rel="stylesheet",
|
163 |
+
integrity="sha384-T3c6CoIi6uLrA9TneNEoa7RxnatzjcDSCmG1MXxSR1GAsXEV/Dwwykc2MPK8M2HN", crossorigin="anonymous")
|
164 |
)
|
165 |
app.hdrs.append(
|
166 |
+
Script(src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js",
|
167 |
+
integrity="sha384-C6RzsynM9kWDrMNeT87bh95OGNyZPhcTNXj1NW7RuBCsyN/o0jlpcV8Qyq46cDfL", crossorigin="anonymous")
|
168 |
)
|
169 |
|
170 |
# --- Run the App (for local development) ---
|
|
|
|
|
171 |
if __name__ == "__main__":
|
172 |
print("Starting Uvicorn server for local development...")
|
173 |
+
uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True)
|
|
|
|
|
|