File size: 2,138 Bytes
452d158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import gradio as gr
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.applications import Xception
import cv2
import numpy as np

def build_deepfake_detection_model():
    cnn_base = Xception(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
    cnn_base.trainable = True
    for layer in cnn_base.layers[:-50]:
        layer.trainable = False
    input_layer = layers.Input(shape=(1, 128, 128, 3))
    x = layers.TimeDistributed(cnn_base)(input_layer)
    x = layers.TimeDistributed(layers.GlobalAveragePooling2D())(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dropout(0.5)(x)
    x = layers.LSTM(128)(x)
    x = layers.Dropout(0.5)(x)
    output = layers.Dense(1, activation='sigmoid')(x)
    model = models.Model(inputs=input_layer, outputs=output)
    return model

# Load the model (you'll need to upload your model weights to Hugging Face)
model = build_deepfake_detection_model()
model.load_weights('dfdc_cnn_lstm_model_finetuned.keras')

def process_video(video_path):
    cap = cv2.VideoCapture(video_path)
    frames = []
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        frame = cv2.resize(frame, (128, 128))
        frame = frame.astype('float32') / 255.0
        frames.append(frame)
    cap.release()
    return np.array(frames)

def predict_deepfake(video):
    frames = process_video(video)
    predictions = []
    for frame in frames:
        frame = np.expand_dims(frame, axis=0)  # Add batch dimension
        frame = np.expand_dims(frame, axis=0)  # Add time dimension
        prediction = model.predict(frame)
        predictions.append(prediction[0][0])
    
    avg_prediction = np.mean(predictions)
    result = "Real" if avg_prediction > 0.5 else "Fake"
    confidence = avg_prediction if result == "Real" else 1 - avg_prediction
    
    return f"{result} with {confidence:.2%} confidence"

iface = gr.Interface(
    fn=predict_deepfake,
    inputs=gr.Video(),
    outputs="text",
    title="Deepfake Detection",
    description="Upload a video to check if it's a deepfake or not."
)

iface.launch()