cmckinle commited on
Commit
af90ec3
·
verified ·
1 Parent(s): 10b7edc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -31
app.py CHANGED
@@ -178,47 +178,45 @@ def classify_images(image_dir, model_pipeline, model_idx):
178
  return labels, preds, images
179
 
180
 
181
- # Function to generate evaluation metrics
182
- def evaluate_model(labels, preds):
183
- cm = confusion_matrix(labels, preds)
184
- accuracy = accuracy_score(labels, preds)
185
- roc_score = roc_auc_score(labels, preds)
186
- report = classification_report(labels, preds)
187
- fpr, tpr, _ = roc_curve(labels, preds)
188
- roc_auc = auc(fpr, tpr)
189
-
190
- fig, ax = plt.subplots()
191
- disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=["AI", "Real"])
192
- disp.plot(cmap=plt.cm.Blues, ax=ax)
193
- plt.close(fig)
194
-
195
- fig_roc, ax_roc = plt.subplots()
196
- ax_roc.plot(fpr, tpr, color='blue', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
197
- ax_roc.plot([0, 1], [0, 1], color='gray', linestyle='--')
198
- ax_roc.set_xlim([0.0, 1.0])
199
- ax_roc.set_ylim([0.0, 1.05])
200
- ax_roc.set_xlabel('False Positive Rate')
201
- ax_roc.set_ylabel('True Positive Rate')
202
- ax_roc.set_title('Receiver Operating Characteristic (ROC) Curve')
203
- ax_roc.legend(loc="lower right")
204
- plt.close(fig_roc)
205
-
206
- return accuracy, roc_score, report, fig, fig_roc
207
 
208
- # Batch processing for all models
209
- # Batch processing for all models
210
  # Batch processing for all models
211
  def process_zip(zip_file):
212
  extracted_dir = extract_zip(zip_file.name)
213
 
214
- # Initialize model pipelines (already initialized outside)
215
  model_pipelines = [pipe0, pipe1, pipe2]
216
 
217
  # Run classification for each model
218
  results = {}
219
- for idx, pipe in enumerate(model_pipelines): # Ensure each model pipeline is used separately
220
  print(f"Processing with model {idx}")
221
- labels, preds, images = classify_images(extracted_dir, pipe, idx) # Pass in model index for debugging
 
 
222
  accuracy, roc_score, report, cm_fig, roc_fig = evaluate_model(labels, preds)
223
 
224
  # Store results for each model
@@ -240,6 +238,7 @@ def process_zip(zip_file):
240
 
241
 
242
 
 
243
  # Single image section
244
  def load_url(url):
245
  try:
 
178
  return labels, preds, images
179
 
180
 
181
+ # Function to classify images in a folder
182
+ def classify_images(image_dir, model_pipeline, model_idx):
183
+ images = []
184
+ labels = []
185
+ preds = []
186
+ for folder_name, ground_truth_label in [('real', 1), ('ai', 0)]:
187
+ folder_path = os.path.join(image_dir, folder_name)
188
+ if not os.path.exists(folder_path):
189
+ continue
190
+ for img_name in os.listdir(folder_path):
191
+ img_path = os.path.join(folder_path, img_name)
192
+ try:
193
+ img = Image.open(img_path).convert("RGB")
194
+
195
+ # Ensure that each image is being processed by the correct model pipeline
196
+ pred = model_pipeline(img)
197
+ pred_label = np.argmax([x['score'] for x in pred])
198
+
199
+ preds.append(pred_label)
200
+ labels.append(ground_truth_label)
201
+ images.append(img_name)
202
+ except Exception as e:
203
+ print(f"Error processing image {img_name} in model {model_idx}: {e}")
204
+ return labels, preds, images
 
 
205
 
 
 
206
  # Batch processing for all models
207
  def process_zip(zip_file):
208
  extracted_dir = extract_zip(zip_file.name)
209
 
210
+ # Initialize model pipelines separately to avoid reuse issues
211
  model_pipelines = [pipe0, pipe1, pipe2]
212
 
213
  # Run classification for each model
214
  results = {}
215
+ for idx, pipe in enumerate(model_pipelines):
216
  print(f"Processing with model {idx}")
217
+
218
+ # Classify images with the correct pipeline per model
219
+ labels, preds, images = classify_images(extracted_dir, pipe, idx)
220
  accuracy, roc_score, report, cm_fig, roc_fig = evaluate_model(labels, preds)
221
 
222
  # Store results for each model
 
238
 
239
 
240
 
241
+
242
  # Single image section
243
  def load_url(url):
244
  try: