DarkVision commited on
Commit
f5a2b99
·
1 Parent(s): b656ffa
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ fake-1.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ p1/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
38
+ real-1.mp4 filter=lfs diff=lfs merge=lfs -text
39
+ Video1-fake-1-ff.mp4 filter=lfs diff=lfs merge=lfs -text
40
+ Video6-real-1-ff.mp4 filter=lfs diff=lfs merge=lfs -text
41
+ Video8-real-3-ff.mp4 filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Deepfakes_Video_Detector
3
+ emoji: 🔥
4
+ colorFrom: blue
5
+ colorTo: gray
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: false
9
+ ---
10
+
11
+ # Configuration
12
+
13
+ `title`: _string_
14
+ Display title for the Space
15
+
16
+ `emoji`: _string_
17
+ Space emoji (emoji-only character allowed)
18
+
19
+ `colorFrom`: _string_
20
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
+
22
+ `colorTo`: _string_
23
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
+
25
+ `sdk`: _string_
26
+ Can be either `gradio`, `streamlit`, or `static`
27
+
28
+ `sdk_version` : _string_
29
+ Only applicable for `streamlit` SDK.
30
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
+
32
+ `app_file`: _string_
33
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
34
+ Path is relative to the root of the repository.
35
+
36
+ `pinned`: _boolean_
37
+ Whether the Space stays on top of your list.
Video1-fake-1-ff.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58262ed5e804069587e393ed06b48e655ca35d7ad58b68c161f5356a14482c48
3
+ size 1746578
Video6-real-1-ff.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad4e54db5f1b0c2f556e039d61ec38e7195edbba6257e266244be64af0bda5e3
3
+ size 1771036
Video8-real-3-ff.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:719f49698458abfa2ff25eb617ff03c5e56ddea51d912d65fbfa44c3db94768a
3
+ size 8949516
api.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, render_template, request, redirect, url_for
2
+ import gradio as gr
3
+ import cv2
4
+ import numpy as np
5
+ import tensorflow as tf
6
+ from facenet_pytorch import MTCNN
7
+ import moviepy.editor as mp
8
+ from PIL import Image
9
+ import os
10
+ import zipfile
11
+ import json
12
+ import base64
13
+ from tensorflow_addons.optimizers import RectifiedAdam # Import the RectifiedAdam optimizer
14
+ from keras.utils import get_custom_objects # Use tensorflow.keras.utils instead
15
+ get_custom_objects().update({"RectifiedAdam": RectifiedAdam})
16
+
17
+ app = Flask(__name__)
18
+
19
+ # Load face detector
20
+ mtcnn = MTCNN(margin=14, keep_all=True, factor=0.7, device='cpu')
21
+
22
+ # DetectionPipeline class
23
+ class DetectionPipeline:
24
+ def __init__(self, detector, n_frames=None, batch_size=60, resize=None):
25
+ self.detector = detector
26
+ self.n_frames = n_frames
27
+ self.batch_size = batch_size
28
+ self.resize = resize
29
+
30
+ def __call__(self, filename):
31
+ v_cap = cv2.VideoCapture(filename)
32
+ v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
33
+
34
+ if self.n_frames is None:
35
+ sample = np.arange(0, v_len)
36
+ else:
37
+ sample = np.linspace(0, v_len - 1, self.n_frames).astype(int)
38
+
39
+ faces = []
40
+ frames = []
41
+
42
+ for j in range(v_len):
43
+ success = v_cap.grab()
44
+ if j in sample:
45
+ success, frame = v_cap.retrieve()
46
+ if not success:
47
+ continue
48
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
49
+
50
+ if self.resize is not None:
51
+ frame = frame.resize([int(d * self.resize) for d in frame.size])
52
+
53
+ frames.append(frame)
54
+
55
+ if len(frames) % self.batch_size == 0 or j == sample[-1]:
56
+ boxes, probs = self.detector.detect(frames)
57
+
58
+ for i in range(len(frames)):
59
+ if boxes[i] is None:
60
+ faces.append(face2)
61
+ continue
62
+
63
+ box = boxes[i][0].astype(int)
64
+ frame = frames[i]
65
+ face = frame[box[1]:box[3], box[0]:box[2]]
66
+
67
+ if not face.any():
68
+ faces.append(face2)
69
+ continue
70
+
71
+ face2 = cv2.resize(face, (224, 224))
72
+ faces.append(face2)
73
+
74
+ frames = []
75
+
76
+ v_cap.release()
77
+
78
+ return faces
79
+
80
+ detection_pipeline = DetectionPipeline(detector=mtcnn, n_frames=20, batch_size=60)
81
+
82
+ model = tf.keras.models.load_model("./Detecto-DeepFake_Video_Detector/p1")
83
+
84
+ def deepfakespredict(input_video):
85
+ faces = detection_pipeline(input_video)
86
+ total = 0
87
+ real = 0
88
+ fake = 0
89
+
90
+ for face in faces:
91
+ face2 = face / 255
92
+ pred = model.predict(np.expand_dims(face2, axis=0))[0]
93
+ total += 1
94
+ pred2 = pred[1]
95
+
96
+ if pred2 > 0.5:
97
+ fake += 1
98
+ else:
99
+ real += 1
100
+
101
+ fake_ratio = fake / total
102
+ text = ""
103
+ text2 = f"Deepfakes Confidence: {fake_ratio * 100:.2f}%"
104
+
105
+ if fake_ratio >= 0.5:
106
+ text = "The video is FAKE."
107
+ else:
108
+ text = "The video is REAL."
109
+
110
+ face_frames = []
111
+
112
+ for face in faces:
113
+ face_frame = Image.fromarray(face.astype('uint8'), 'RGB')
114
+ face_frames.append(face_frame)
115
+
116
+ face_frames[0].save('results.gif', save_all=True, append_images=face_frames[1:], duration=250, loop=100)
117
+ clip = mp.VideoFileClip("results.gif")
118
+ clip.write_videofile("video.mp4")
119
+
120
+ return text, text2, "video.mp4"
121
+
122
+ iface = gr.Interface(
123
+ fn=deepfakespredict,
124
+ inputs=gr.inputs.Video(type="mp4"),
125
+ outputs=[
126
+ gr.outputs.Text(label="Detection Result"),
127
+ gr.outputs.Text(label="Confidence"),
128
+ gr.outputs.File(label="Result Video")
129
+ ],
130
+ live=True,
131
+ title="EfficientNetV2 Deepfakes Video Detector",
132
+ description="This is a demo implementation of EfficientNetV2 Deepfakes Image Detector ",
133
+ examples=[
134
+ [open('./Detecto-DeepFake_Video_Detector/Video1-fake-1-ff.mp4', 'rb')],
135
+ [open('./Detecto-DeepFake_Video_Detector/Video6-real-1-ff.mp4', 'rb')],
136
+ [open('./Detecto-DeepFake_Video_Detector/Video3-fake-3-ff.mp4', 'rb')],
137
+ [open('./Detecto-DeepFake_Video_Detector/Video8-real-3-ff.mp4', 'rb')],
138
+ [open('./Detecto-DeepFake_Video_Detector/real-1.mp4', 'rb')],
139
+ [open('./Detecto-DeepFake_Video_Detector/fake-1.mp4', 'rb')]
140
+ ]
141
+ )
142
+
143
+ @app.route('/')
144
+ def index():
145
+ iface.launch(share=True)
146
+ return iface.ui()
147
+
148
+ if __name__ == '__main__':
149
+ app.run(debug=True)
app.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ import tensorflow as tf
5
+ import tensorflow_addons
6
+
7
+ from facenet_pytorch import MTCNN
8
+ from PIL import Image
9
+ import moviepy.editor as mp
10
+ import os
11
+ import zipfile
12
+
13
+ # local_zip = "FINAL-EFFICIENTNETV2-B0.zip"
14
+ # zip_ref = zipfile.ZipFile(local_zip, 'r')
15
+ # zip_ref.extractall('FINAL-EFFICIENTNETV2-B0')
16
+ # zip_ref.close()
17
+
18
+ # Load face detector
19
+ mtcnn = MTCNN(margin=14, keep_all=True, factor=0.7, device='cpu')
20
+
21
+ #Face Detection function, Reference: (Timesler, 2020); Source link: https://www.kaggle.com/timesler/facial-recognition-model-in-pytorch
22
+ class DetectionPipeline:
23
+ """Pipeline class for detecting faces in the frames of a video file."""
24
+
25
+ def __init__(self, detector, n_frames=None, batch_size=60, resize=None):
26
+ """Constructor for DetectionPipeline class.
27
+
28
+ Keyword Arguments:
29
+ n_frames {int} -- Total number of frames to load. These will be evenly spaced
30
+ throughout the video. If not specified (i.e., None), all frames will be loaded.
31
+ (default: {None})
32
+ batch_size {int} -- Batch size to use with MTCNN face detector. (default: {32})
33
+ resize {float} -- Fraction by which to resize frames from original prior to face
34
+ detection. A value less than 1 results in downsampling and a value greater than
35
+ 1 result in upsampling. (default: {None})
36
+ """
37
+ self.detector = detector
38
+ self.n_frames = n_frames
39
+ self.batch_size = batch_size
40
+ self.resize = resize
41
+
42
+ def __call__(self, filename):
43
+ """Load frames from an MP4 video and detect faces.
44
+
45
+ Arguments:
46
+ filename {str} -- Path to video.
47
+ """
48
+ # Create video reader and find length
49
+ v_cap = cv2.VideoCapture(filename)
50
+ v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
51
+
52
+ # Pick 'n_frames' evenly spaced frames to sample
53
+ if self.n_frames is None:
54
+ sample = np.arange(0, v_len)
55
+ else:
56
+ sample = np.linspace(0, v_len - 1, self.n_frames).astype(int)
57
+
58
+ # Loop through frames
59
+ faces = []
60
+ frames = []
61
+ for j in range(v_len):
62
+ success = v_cap.grab()
63
+ if j in sample:
64
+ # Load frame
65
+ success, frame = v_cap.retrieve()
66
+ if not success:
67
+ continue
68
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
69
+ # frame = Image.fromarray(frame)
70
+
71
+ # Resize frame to desired size
72
+ if self.resize is not None:
73
+ frame = frame.resize([int(d * self.resize) for d in frame.size])
74
+ frames.append(frame)
75
+
76
+ # When batch is full, detect faces and reset frame list
77
+ if len(frames) % self.batch_size == 0 or j == sample[-1]:
78
+
79
+ boxes, probs = self.detector.detect(frames)
80
+
81
+ for i in range(len(frames)):
82
+
83
+ if boxes[i] is None:
84
+ faces.append(face2) #append previous face frame if no face is detected
85
+ continue
86
+
87
+ box = boxes[i][0].astype(int)
88
+ frame = frames[i]
89
+ face = frame[box[1]:box[3], box[0]:box[2]]
90
+
91
+ if not face.any():
92
+ faces.append(face2) #append previous face frame if no face is detected
93
+ continue
94
+
95
+ face2 = cv2.resize(face, (224, 224))
96
+
97
+ faces.append(face2)
98
+
99
+ frames = []
100
+
101
+ v_cap.release()
102
+
103
+ return faces
104
+
105
+
106
+ detection_pipeline = DetectionPipeline(detector=mtcnn,n_frames=20, batch_size=60)
107
+
108
+ model = tf.keras.models.load_model("./Detecto-DeepFake_Video_Detector/p1")
109
+
110
+
111
+ def deepfakespredict(input_video):
112
+
113
+ faces = detection_pipeline(input_video)
114
+
115
+ total = 0
116
+ real = 0
117
+ fake = 0
118
+
119
+ for face in faces:
120
+
121
+ face2 = face/255
122
+ pred = model.predict(np.expand_dims(face2, axis=0))[0]
123
+ total+=1
124
+
125
+ pred2 = pred[1]
126
+
127
+ if pred2 > 0.5:
128
+ fake+=1
129
+ else:
130
+ real+=1
131
+
132
+ fake_ratio = fake/total
133
+
134
+ text =""
135
+ text2 = "Deepfakes Confidence: " + str(fake_ratio*100) + "%"
136
+
137
+ if fake_ratio >= 0.5:
138
+ text = "The video is FAKE."
139
+ else:
140
+ text = "The video is REAL."
141
+
142
+ face_frames = []
143
+
144
+ for face in faces:
145
+ face_frame = Image.fromarray(face.astype('uint8'), 'RGB')
146
+ face_frames.append(face_frame)
147
+
148
+ face_frames[0].save('results.gif', save_all=True, append_images=face_frames[1:], duration = 250, loop = 100 )
149
+ clip = mp.VideoFileClip("results.gif")
150
+ clip.write_videofile("video.mp4")
151
+
152
+ return text, text2, "video.mp4"
153
+
154
+
155
+
156
+ title="EfficientNetV2 Deepfakes Video Detector"
157
+ description="This is a demo implementation of EfficientNetV2 Deepfakes Image Detector by using frame-by-frame detection. \
158
+ To use it, simply upload your video, or click one of the examples to load them.\
159
+ This demo and model represent the Final Year Project titled \"Achieving Face Swapped Deepfakes Detection Using EfficientNetV2\" by a CS undergraduate Lee Sheng Yeh. \
160
+ The examples were extracted from Celeb-DF(V2)(Li et al, 2020) and FaceForensics++(Rossler et al., 2019). Full reference details is available in \"references.txt.\" \
161
+ The examples are used under fair use to demo the working of the model only. If any copyright is infringed, please contact the researcher via this email: [email protected].\
162
+ "
163
+
164
+ examples = [
165
+ ['./Detecto-DeepFake_Video_Detector/Video1-fake-1-ff.mp4'],
166
+ ['./Detecto-DeepFake_Video_Detector/Video6-real-1-ff.mp4'],
167
+ ['./Detecto-DeepFake_Video_Detector/Video3-fake-3-ff.mp4'],
168
+ ['./Detecto-DeepFake_Video_Detector/Video8-real-3-ff.mp4'],
169
+ ['./Detecto-DeepFake_Video_Detector/real-1.mp4'],
170
+ ['./Detecto-DeepFake_Video_Detector/fake-1.mp4'],
171
+ ]
172
+
173
+ gr.Interface(deepfakespredict,
174
+ inputs = ["video"],
175
+ outputs=["text","text", gr.outputs.Video(label="Detected face sequence")],
176
+ title=title,
177
+ description=description,
178
+ examples=examples
179
+ ).launch()
180
+
181
+ # # Import the necessary module to interact with the Hugging Face Hub.
182
+ # from huggingface_hub import notebook_login
183
+
184
+ # # Perform a login to the Hugging Face Hub.
185
+ # notebook_login()
186
+
187
+ # # Import the HfApi class from the huggingface_hub library.
188
+ # from huggingface_hub import HfApi
189
+
190
+ # # Create an instance of the HfApi class.
191
+ # api = HfApi()
192
+
193
+ # # Define the repository ID by combining the username "dima806" with the model name.
194
+ # repo_id = f"DarkVision/Deepfake_detection_video"
195
+
196
+ # try:
197
+ # # Attempt to create a new repository on the Hugging Face Model Hub using the specified repo_id.
198
+ # api.create_repo(repo_id)
199
+
200
+ # # If the repository creation is successful, print a message indicating that the repository was created.
201
+ # print(f"Repo {repo_id} created")
202
+ # except:
203
+ # # If an exception is raised, print a message indicating that the repository already exists.
204
+ # print(f"Repo {repo_id} already exists")
205
+
206
+ # # Uploading a folder to the Hugging Face Model Hub
207
+ # api.upload_folder(
208
+ # folder_path= "Detecto-DeepFake_Video_Detector/", # The path to the folder to be uploaded
209
+ # path_in_repo=".", # The path where the folder will be stored in the repository
210
+ # repo_id=repo_id, # The ID of the repository where the folder will be uploaded
211
+ # repo_type="model", # The type of the repository (in this case, a model repository)
212
+ # revision="main" # Revision name
213
+ # )
fake-1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14d58b019d1d2a2be3c8293654b00a5fe7c3912267885eb8a9d42cfde411f91f
3
+ size 1142692
p1/keras_metadata.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8411f85bd22de246fee31adc6bbf0a60d403ac22d8f572154fd77eb866b8daf3
3
+ size 202114
p1/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca17aff86eeedbeab2ace0fc42296a1fe11352c6adb418f04f96c5a3607bd28a
3
+ size 10505251
p1/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6418ccca9c8b62339ccfae9e5e3aae785fbdeed31fa08af7207ad4f0fc94fbbf
3
+ size 23824720
p1/variables/variables.index ADDED
Binary file (21.2 kB). View file
 
packages.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
real-1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c80effbdbdf7ea5b6b2fab02fa8d4b5dde64aef46c91d6c6911a01e6d03673a4
3
+ size 1152146