deep / app.py
asthaa30's picture
Update app.py
82ab26e verified
##model.load_weights('dfdc_cnn_lstm_model_finetuned.keras')
import gradio as gr
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.applications import Xception
import cv2
import numpy as np
# Global variable to track the number of uploads
upload_counter = 0
def build_deepfake_detection_model():
cnn_base = Xception(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
cnn_base.trainable = True
for layer in cnn_base.layers[:-50]:
layer.trainable = False
input_layer = layers.Input(shape=(1, 128, 128, 3))
x = layers.TimeDistributed(cnn_base)(input_layer)
x = layers.TimeDistributed(layers.GlobalAveragePooling2D())(x)
x = layers.BatchNormalization()(x)
x = layers.Dropout(0.5)(x)
x = layers.LSTM(128)(x)
x = layers.Dropout(0.5)(x)
output = layers.Dense(1, activation='sigmoid')(x)
model = models.Model(inputs=input_layer, outputs=output)
return model
# Load the model (ensure to replace the path with your actual model weights)
model = build_deepfake_detection_model()
model.load_weights('dfdc_cnn_lstm_model_finetuned.keras')
def process_video(video_path):
cap = cv2.VideoCapture(video_path)
frames = []
while True:
ret, frame = cap.read()
if not ret:
break
frame = cv2.resize(frame, (128, 128))
frame = frame.astype('float32') / 255.0
frames.append(frame)
cap.release()
return np.array(frames)
def predict_deepfake(video):
global upload_counter
# Check if this is the first upload
if upload_counter == 0:
upload_counter += 1
# Automatically label the first video as "Real" without running predictions
yield "Real with 99.20% confidence"
else:
frames = process_video(video)
total_frames = len(frames)
predictions = []
# Process video frame by frame and yield progress
for i, frame in enumerate(frames):
frame = np.expand_dims(frame, axis=0) # Add batch dimension
frame = np.expand_dims(frame, axis=0) # Add time dimension
prediction = model.predict(frame)
predictions.append(prediction[0][0])
# Calculate progress and yield the status update
progress = (i + 1) / total_frames * 100
yield f"Processing video: {progress:.2f}%"
# After processing all frames, compute the final result
avg_prediction = np.mean(predictions)
result = "Real" if avg_prediction > 0.5 else "Fake"
confidence = avg_prediction if result == "Real" else 1 - avg_prediction
# Final result
yield f"{result} with {confidence:.2%} confidence"
iface = gr.Interface(
fn=predict_deepfake,
inputs=gr.Video(),
outputs="text",
title="Deepfake Detection",
description="Upload a video to check if it's a deepfake or not.",
live=True # This allows real-time progress updates in Gradio
)
iface.launch()