Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -178,47 +178,45 @@ def classify_images(image_dir, model_pipeline, model_idx):
|
|
178 |
return labels, preds, images
|
179 |
|
180 |
|
181 |
-
# Function to
|
182 |
-
def
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
return accuracy, roc_score, report, fig, fig_roc
|
207 |
|
208 |
-
# Batch processing for all models
|
209 |
-
# Batch processing for all models
|
210 |
# Batch processing for all models
|
211 |
def process_zip(zip_file):
|
212 |
extracted_dir = extract_zip(zip_file.name)
|
213 |
|
214 |
-
# Initialize model pipelines
|
215 |
model_pipelines = [pipe0, pipe1, pipe2]
|
216 |
|
217 |
# Run classification for each model
|
218 |
results = {}
|
219 |
-
for idx, pipe in enumerate(model_pipelines):
|
220 |
print(f"Processing with model {idx}")
|
221 |
-
|
|
|
|
|
222 |
accuracy, roc_score, report, cm_fig, roc_fig = evaluate_model(labels, preds)
|
223 |
|
224 |
# Store results for each model
|
@@ -240,6 +238,7 @@ def process_zip(zip_file):
|
|
240 |
|
241 |
|
242 |
|
|
|
243 |
# Single image section
|
244 |
def load_url(url):
|
245 |
try:
|
|
|
178 |
return labels, preds, images
|
179 |
|
180 |
|
181 |
+
# Function to classify images in a folder
|
182 |
+
def classify_images(image_dir, model_pipeline, model_idx):
|
183 |
+
images = []
|
184 |
+
labels = []
|
185 |
+
preds = []
|
186 |
+
for folder_name, ground_truth_label in [('real', 1), ('ai', 0)]:
|
187 |
+
folder_path = os.path.join(image_dir, folder_name)
|
188 |
+
if not os.path.exists(folder_path):
|
189 |
+
continue
|
190 |
+
for img_name in os.listdir(folder_path):
|
191 |
+
img_path = os.path.join(folder_path, img_name)
|
192 |
+
try:
|
193 |
+
img = Image.open(img_path).convert("RGB")
|
194 |
+
|
195 |
+
# Ensure that each image is being processed by the correct model pipeline
|
196 |
+
pred = model_pipeline(img)
|
197 |
+
pred_label = np.argmax([x['score'] for x in pred])
|
198 |
+
|
199 |
+
preds.append(pred_label)
|
200 |
+
labels.append(ground_truth_label)
|
201 |
+
images.append(img_name)
|
202 |
+
except Exception as e:
|
203 |
+
print(f"Error processing image {img_name} in model {model_idx}: {e}")
|
204 |
+
return labels, preds, images
|
|
|
|
|
205 |
|
|
|
|
|
206 |
# Batch processing for all models
|
207 |
def process_zip(zip_file):
|
208 |
extracted_dir = extract_zip(zip_file.name)
|
209 |
|
210 |
+
# Initialize model pipelines separately to avoid reuse issues
|
211 |
model_pipelines = [pipe0, pipe1, pipe2]
|
212 |
|
213 |
# Run classification for each model
|
214 |
results = {}
|
215 |
+
for idx, pipe in enumerate(model_pipelines):
|
216 |
print(f"Processing with model {idx}")
|
217 |
+
|
218 |
+
# Classify images with the correct pipeline per model
|
219 |
+
labels, preds, images = classify_images(extracted_dir, pipe, idx)
|
220 |
accuracy, roc_score, report, cm_fig, roc_fig = evaluate_model(labels, preds)
|
221 |
|
222 |
# Store results for each model
|
|
|
238 |
|
239 |
|
240 |
|
241 |
+
|
242 |
# Single image section
|
243 |
def load_url(url):
|
244 |
try:
|