heatmap / deepfake_detector.py
noumanjavaid's picture
Upload 11 files
9e629a3 verified
raw
history blame
8.5 kB
import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
from PIL import Image
class DeepfakeDetector:
def __init__(self, model_path=None):
"""
Initialize the deepfake detector with Nvidia AI model
Args:
model_path: Path to the pre-trained Nvidia AI model
"""
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = self._load_model(model_path)
self.transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
def _load_model(self, model_path):
"""
Load the Nvidia AI model for deepfake detection
Args:
model_path: Path to the pre-trained model
Returns:
Loaded model
"""
# This is a placeholder for the actual model loading code
# In a real implementation, you would load the specific Nvidia AI model here
if model_path:
try:
# Example placeholder for model loading
# model = torch.load(model_path, map_location=self.device)
print(f"Model loaded from {model_path}")
return None # Replace with actual model
except Exception as e:
print(f"Error loading model: {e}")
return None
else:
print("No model path provided, using default detection methods")
return None
def calculate_smi(self, image1, image2):
"""
Calculate Structural Matching Index between two images
Args:
image1: First image (numpy array or path)
image2: Second image (numpy array or path)
Returns:
SMI score (float between 0 and 1)
"""
# Convert paths to images if needed
if isinstance(image1, str):
image1 = cv2.imread(image1)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
if isinstance(image2, str):
image2 = cv2.imread(image2)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
# Ensure images are the same size
if image1.shape != image2.shape:
image2 = cv2.resize(image2, (image1.shape[1], image1.shape[0]))
# Calculate SMI (similar to SSIM but adapted for deepfake detection)
# This is a simplified version - in a real implementation, you would use
# the specific SMI calculation from the Nvidia AI model
gray1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)
# Using SSIM as a placeholder for SMI
from skimage.metrics import structural_similarity as ssim
smi_score, _ = ssim(gray1, gray2, full=True)
return smi_score
def generate_difference_image(self, image1, image2):
"""
Generate a difference image highlighting areas of discrepancy
Args:
image1: First image (numpy array or path)
image2: Second image (numpy array or path)
Returns:
Difference image (numpy array)
"""
# Convert paths to images if needed
if isinstance(image1, str):
image1 = cv2.imread(image1)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
if isinstance(image2, str):
image2 = cv2.imread(image2)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
# Ensure images are the same size
if image1.shape != image2.shape:
image2 = cv2.resize(image2, (image1.shape[1], image1.shape[0]))
# Convert to grayscale
gray1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)
# Compute the absolute difference
diff = cv2.absdiff(gray1, gray2)
# Normalize for better visualization
diff_normalized = cv2.normalize(diff, None, 0, 255, cv2.NORM_MINMAX)
return diff_normalized
def apply_threshold(self, diff_image, threshold=30):
"""
Apply threshold to difference image to highlight significant differences
Args:
diff_image: Difference image (numpy array)
threshold: Threshold value (0-255)
Returns:
Thresholded image (numpy array)
"""
_, thresh = cv2.threshold(diff_image, threshold, 255, cv2.THRESH_BINARY)
return thresh
def detect_bounding_boxes(self, thresh_image, min_area=100):
"""
Detect bounding boxes around areas of significant difference
Args:
thresh_image: Thresholded image (numpy array)
min_area: Minimum contour area to consider
Returns:
List of bounding boxes (x, y, w, h)
"""
# Find contours in the thresholded image
contours, _ = cv2.findContours(thresh_image.astype(np.uint8),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# Filter contours by area and get bounding boxes
bounding_boxes = []
for contour in contours:
area = cv2.contourArea(contour)
if area >= min_area:
x, y, w, h = cv2.boundingRect(contour)
bounding_boxes.append((x, y, w, h))
return bounding_boxes
def draw_bounding_boxes(self, image, bounding_boxes, color=(0, 255, 0), thickness=2):
"""
Draw bounding boxes on an image
Args:
image: Image to draw on (numpy array)
bounding_boxes: List of bounding boxes (x, y, w, h)
color: Box color (B, G, R)
thickness: Line thickness
Returns:
Image with bounding boxes
"""
# Make a copy of the image to avoid modifying the original
result = image.copy()
# Draw each bounding box
for (x, y, w, h) in bounding_boxes:
cv2.rectangle(result, (x, y), (x + w, y + h), color, thickness)
return result
def process_image_pair(self, image1, image2, threshold=30, min_area=100):
"""
Process a pair of images through the complete verification pipeline
Args:
image1: First image (numpy array or path)
image2: Second image (numpy array or path)
threshold: Threshold value for difference detection
min_area: Minimum area for bounding box detection
Returns:
Dictionary containing:
- smi_score: Structural Matching Index
- difference_image: Difference visualization
- threshold_image: Thresholded difference image
- bounding_boxes: List of detected bounding boxes
- annotated_image: Original image with bounding boxes
"""
# Load images if paths are provided
if isinstance(image1, str):
img1 = cv2.imread(image1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
else:
img1 = image1.copy()
if isinstance(image2, str):
img2 = cv2.imread(image2)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
else:
img2 = image2.copy()
# Calculate SMI
smi_score = self.calculate_smi(img1, img2)
# Generate difference image
diff_image = self.generate_difference_image(img1, img2)
# Apply threshold
thresh_image = self.apply_threshold(diff_image, threshold)
# Detect bounding boxes
bounding_boxes = self.detect_bounding_boxes(thresh_image, min_area)
# Draw bounding boxes on original image
annotated_image = self.draw_bounding_boxes(img1, bounding_boxes)
# Return results
return {
'smi_score': smi_score,
'difference_image': diff_image,
'threshold_image': thresh_image,
'bounding_boxes': bounding_boxes,
'annotated_image': annotated_image
}