Spaces:
Runtime error
Runtime error
import os | |
import gradio as gr | |
from transformers import pipeline | |
import cv2 | |
# Load Hugging Face Gender Classifier model (ProjectPersonal/GenderClassifier) | |
gender_classifier = pipeline("image-classification", model="ProjectPersonal/GenderClassifier") | |
# Load a face-swapping model (First Order Motion Model for face swap) | |
# Make sure this model is suitable for face-swapping tasks | |
face_swap_model = pipeline("image-to-image", model="first-order-motion-model/first-order-motion-model") | |
def detect_gender(image_path): | |
# Use the gender classifier to detect gender from the image | |
result = gender_classifier(image_path) | |
# Return the predicted gender (male or female) | |
return result[0]['label'].lower() # This will return "male" or "female" | |
def swap_faces(image, video, selected_gender): | |
# Save uploaded files | |
image_path = "uploaded_image.jpg" | |
video_path = "uploaded_video.mp4" | |
output_path = "swapped_video.mp4" | |
image.save(image_path) | |
video.save(video_path) | |
# Detect gender of the reference image | |
detected_gender = detect_gender(image_path) | |
# If the detected gender matches the selected gender or if "all" is selected, proceed with the face swap | |
if selected_gender == "all" or detected_gender == selected_gender: | |
video_cap = cv2.VideoCapture(video_path) | |
frame_rate = video_cap.get(cv2.CAP_PROP_FPS) | |
width = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
height = int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
output_video = cv2.VideoWriter(output_path, fourcc, frame_rate, (width, height)) | |
while True: | |
ret, frame = video_cap.read() | |
if not ret: | |
break | |
# Apply the face swap model (this will apply the model's logic) | |
# Ensure you replace this with the actual face-swapping function | |
swapped_frame = face_swap_model(frame, reference_image=image_path) # Example, replace with correct function | |
output_video.write(swapped_frame) | |
video_cap.release() | |
output_video.release() | |
else: | |
# Return the original video without face swap if gender doesn't match | |
output_path = video_path # No face swap applied | |
return output_path | |
# Gradio UI | |
iface = gr.Interface( | |
fn=swap_faces, | |
inputs=[ | |
gr.Image(type="pil", label="Upload Reference Image"), | |
gr.Video(type="file", label="Upload Video"), | |
gr.Dropdown(choices=["all", "male", "female"], label="Select Gender") | |
], | |
outputs=gr.Video(label="Face Swapped Video"), | |
title="Video Face Swap", | |
description="Upload a reference image and a video to swap faces based on gender selection." | |
) | |
iface.launch() | |