FSFM-3C commited on
Commit
75db2c5
·
1 Parent(s): ae2e02c
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -273,7 +273,7 @@ def extract_face_from_fixed_num_frames(src_video, dst_path, num_frames=None, dev
273
 
274
  video_capture.release()
275
  # cv2.destroyAllWindows()
276
- return frame_index
277
 
278
 
279
  def FSFM3C_video_detection(video):
@@ -287,7 +287,7 @@ def FSFM3C_video_detection(video):
287
  frame_path = os.path.join(FRAME_SAVE_PATH, str(num_files))
288
  os.makedirs(frame_path, exist_ok=True)
289
  os.makedirs(os.path.join(frame_path, '0'), exist_ok=True)
290
- frame_index = extract_face_from_fixed_num_frames(video, frame_path, num_frames=num_frames, device=device)
291
 
292
  args.data_path = frame_path
293
  args.batch_size = 32
@@ -304,7 +304,7 @@ def FSFM3C_video_detection(video):
304
  frame_preds_list, video_pred_list = test_all(data_loader_val, model, device)
305
 
306
  real_prob_frames = [round(1. - fake_score, 2) for fake_score in video_pred_list]
307
- frame_results = {f"frame_{frame}": f"{int(real_prob_frames[i] * 100)}%" for i, frame in enumerate(frame_index)}
308
 
309
  real_prob_video = int(round(1. - (sum(video_pred_list) / len(video_pred_list)), 2) * 100)
310
  if real_prob_video > 50:
@@ -372,7 +372,7 @@ with gr.Blocks() as demo:
372
 
373
  gr.Markdown("- <b>V1.0 [2024-12] (Current):</b> "
374
  "Create this page with basic detectors (simply fine-tuned models) that follow the paper implementation. "
375
- "<b>Notes:</b> Performance is limited because no any optimization of data, models, hyperparameters, etc. is done for downstream tasks. \ "
376
  "<b>[TODO]: </b> Update practical models, and optimized interfaces, and provide more functions such as visualizations, a unified detector, and multi-modal diagnosis.")
377
 
378
  gr.Markdown(
 
273
 
274
  video_capture.release()
275
  # cv2.destroyAllWindows()
276
+ return frame_indices
277
 
278
 
279
  def FSFM3C_video_detection(video):
 
287
  frame_path = os.path.join(FRAME_SAVE_PATH, str(num_files))
288
  os.makedirs(frame_path, exist_ok=True)
289
  os.makedirs(os.path.join(frame_path, '0'), exist_ok=True)
290
+ frame_indices = extract_face_from_fixed_num_frames(video, frame_path, num_frames=num_frames, device=device)
291
 
292
  args.data_path = frame_path
293
  args.batch_size = 32
 
304
  frame_preds_list, video_pred_list = test_all(data_loader_val, model, device)
305
 
306
  real_prob_frames = [round(1. - fake_score, 2) for fake_score in video_pred_list]
307
+ frame_results = {f"frame_{frame}": f"{int(real_prob_frames[i] * 100)}%" for i, frame in enumerate(frame_indices)}
308
 
309
  real_prob_video = int(round(1. - (sum(video_pred_list) / len(video_pred_list)), 2) * 100)
310
  if real_prob_video > 50:
 
372
 
373
  gr.Markdown("- <b>V1.0 [2024-12] (Current):</b> "
374
  "Create this page with basic detectors (simply fine-tuned models) that follow the paper implementation. "
375
+ "<b>Notes:</b> Performance is limited because no any optimization of data, models, hyperparameters, etc. is done for downstream tasks. <br> "
376
  "<b>[TODO]: </b> Update practical models, and optimized interfaces, and provide more functions such as visualizations, a unified detector, and multi-modal diagnosis.")
377
 
378
  gr.Markdown(