monster07 commited on
Commit
06fdfc7
Β·
verified Β·
1 Parent(s): 036f96b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -40
app.py CHANGED
@@ -1,89 +1,90 @@
1
-
2
  import gradio as gr
 
3
  import torch
4
  import numpy as np
5
- import cv2
6
  import matplotlib.pyplot as plt
7
  from PIL import Image
8
  from transformers import AutoImageProcessor, SiglipForImageClassification
9
 
10
- # βœ… Load model from Hugging Face (no manual files)
11
  model_name = "prithivMLmods/deepfake-detector-model-v1"
12
  processor = AutoImageProcessor.from_pretrained(model_name)
13
  model = SiglipForImageClassification.from_pretrained(model_name)
14
  model.eval()
15
 
16
- # βœ… Haar cascade for face detection
17
  face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
18
 
19
- # βœ… Deepfake analysis logic
20
- def detect_deepfake(video_path):
21
  if video_path is None:
22
- return "❌ Please upload a valid .mp4 video", None
23
 
24
  cap = cv2.VideoCapture(video_path)
25
- preds = []
26
- count = 0
27
- max_frames = 20
28
 
29
  while True:
30
  ret, frame = cap.read()
31
- if not ret or count >= max_frames:
32
  break
33
 
34
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
35
- faces = face_detector.detectMultiScale(gray, 1.1, 4)
36
 
37
- if len(faces) > 0:
38
- x, y, w, h = faces[0] # Take first detected face
39
  face = frame[y:y+h, x:x+w]
40
  if face.size == 0:
41
  continue
42
 
43
  face_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
44
- pil_img = Image.fromarray(face_rgb)
45
- inputs = processor(images=pil_img, return_tensors="pt")
46
 
47
  with torch.no_grad():
48
  logits = model(**inputs).logits
49
- prob = torch.softmax(logits, dim=-1)[0][1].item()
50
- preds.append(prob)
 
 
 
 
 
 
51
 
52
- count += 1
53
 
54
  cap.release()
55
 
56
- if not preds:
57
- return "❌ No faces detected. Try a clearer video.", None
58
 
59
- avg_conf = np.mean(preds)
60
- label = "**FAKE**" if avg_conf > 0.5 else "**REAL**"
61
- result = f"""
62
- 🎯 **Result:** {label}
63
- πŸ”’ Avg Confidence: {avg_conf:.2f}
64
- πŸ“Š Frames Analyzed: {len(preds)}
65
- """
66
 
67
- # βœ… Create histogram
68
- fig, ax = plt.subplots()
69
- ax.hist(preds, bins=10, color="red" if avg_conf > 0.5 else "green", edgecolor="black")
70
  ax.set_title("Fake Confidence per Frame")
71
- ax.set_xlabel("Fake Probability (0 = Real, 1 = Fake)")
72
- ax.set_ylabel("Frames")
73
  ax.grid(True)
74
 
75
  return result, fig
76
 
77
  # βœ… Gradio interface
78
  with gr.Blocks() as demo:
79
- gr.Markdown("## 🎭 Deepfake Detector (Transformer-based)")
80
- gr.Markdown("Upload a short `.mp4` video. The app will detect faces and classify the video as **REAL** or **FAKE**.")
81
 
82
- video_input = gr.Video(label="πŸ“€ Upload your video")
83
- result_output = gr.Markdown()
84
- graph_output = gr.Plot()
85
- analyze_button = gr.Button("πŸ” Analyze")
86
 
87
- analyze_button.click(fn=detect_deepfake, inputs=video_input, outputs=[result_output, graph_output])
88
 
89
  demo.queue().launch()
 
 
1
  import gradio as gr
2
+ import cv2
3
  import torch
4
  import numpy as np
 
5
  import matplotlib.pyplot as plt
6
  from PIL import Image
7
  from transformers import AutoImageProcessor, SiglipForImageClassification
8
 
9
+ # βœ… Load model and processor (no manual files)
10
  model_name = "prithivMLmods/deepfake-detector-model-v1"
11
  processor = AutoImageProcessor.from_pretrained(model_name)
12
  model = SiglipForImageClassification.from_pretrained(model_name)
13
  model.eval()
14
 
15
+ # βœ… Face detector
16
  face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
17
 
18
+ # βœ… Deepfake detection function
19
+ def analyze(video_path):
20
  if video_path is None:
21
+ return "❌ Please upload a video", None
22
 
23
  cap = cv2.VideoCapture(video_path)
24
+ frame_preds = []
25
+ frame_count = 0
26
+ max_frames = 60
27
 
28
  while True:
29
  ret, frame = cap.read()
30
+ if not ret or frame_count >= max_frames:
31
  break
32
 
33
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
34
+ faces = face_detector.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
35
 
36
+ found = False
37
+ for (x, y, w, h) in faces:
38
  face = frame[y:y+h, x:x+w]
39
  if face.size == 0:
40
  continue
41
 
42
  face_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
43
+ pil_image = Image.fromarray(face_rgb)
44
+ inputs = processor(images=pil_image, return_tensors="pt")
45
 
46
  with torch.no_grad():
47
  logits = model(**inputs).logits
48
+ fake_prob = torch.softmax(logits, dim=-1)[0][1].item()
49
+
50
+ frame_preds.append(fake_prob)
51
+ found = True
52
+ break
53
+
54
+ if not found:
55
+ frame_preds.append(0.5) # neutral prediction
56
 
57
+ frame_count += 1
58
 
59
  cap.release()
60
 
61
+ if not frame_preds:
62
+ return "❌ No faces found. Try a better-quality video.", None
63
 
64
+ avg = np.mean(frame_preds)
65
+ verdict = "FAKE" if avg > 0.5 else "REAL"
66
+ result = f"βœ… FINAL RESULT: **{verdict}**\nπŸ”’ Confidence: {avg:.2f}"
 
 
 
 
67
 
68
+ # βœ… Plot
69
+ fig, ax = plt.subplots(figsize=(6, 4))
70
+ ax.hist(frame_preds, bins=10, color="red" if avg > 0.5 else "green", edgecolor="black")
71
  ax.set_title("Fake Confidence per Frame")
72
+ ax.set_xlabel("Confidence (0=Real, 1=Fake)")
73
+ ax.set_ylabel("Frame Count")
74
  ax.grid(True)
75
 
76
  return result, fig
77
 
78
  # βœ… Gradio interface
79
  with gr.Blocks() as demo:
80
+ gr.Markdown("## 🎭 Deepfake Detector (Colab Version Converted to Gradio)")
81
+ gr.Markdown("Upload a short `.mp4` video and get a REAL or FAKE decision with confidence histogram.")
82
 
83
+ video = gr.Video(label="Upload your video")
84
+ result = gr.Markdown()
85
+ plot = gr.Plot()
86
+ button = gr.Button("πŸ” Analyze")
87
 
88
+ button.click(fn=analyze, inputs=video, outputs=[result, plot])
89
 
90
  demo.queue().launch()