Spaces:
Runtime error
Runtime error
Uploaded app.py
Browse files
app.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import pipeline
|
4 |
+
import cv2
|
5 |
+
|
6 |
+
# Load Hugging Face Gender Classifier model (ProjectPersonal/GenderClassifier)
|
7 |
+
gender_classifier = pipeline("image-classification", model="ProjectPersonal/GenderClassifier")
|
8 |
+
|
9 |
+
# Load Hugging Face face swap model (DeepFaceLab by senhan007)
|
10 |
+
face_swap_model = pipeline("image-to-image", model="senhan007/DeepFaceLab")
|
11 |
+
|
12 |
+
def detect_gender(image_path):
|
13 |
+
# Use the gender classifier to detect gender from the image
|
14 |
+
result = gender_classifier(image_path)
|
15 |
+
# Return the predicted gender (male or female)
|
16 |
+
return result[0]['label'].lower() # This will return "male" or "female"
|
17 |
+
|
18 |
+
def swap_faces(image, video, selected_gender):
|
19 |
+
# Save uploaded files
|
20 |
+
image_path = "uploaded_image.jpg"
|
21 |
+
video_path = "uploaded_video.mp4"
|
22 |
+
output_path = "swapped_video.mp4"
|
23 |
+
image.save(image_path)
|
24 |
+
video.save(video_path)
|
25 |
+
|
26 |
+
# Detect gender of the reference image
|
27 |
+
detected_gender = detect_gender(image_path)
|
28 |
+
|
29 |
+
# If the detected gender matches the selected gender or if "all" is selected, proceed with the face swap
|
30 |
+
if selected_gender == "all" or detected_gender == selected_gender:
|
31 |
+
video_cap = cv2.VideoCapture(video_path)
|
32 |
+
frame_rate = video_cap.get(cv2.CAP_PROP_FPS)
|
33 |
+
width = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
34 |
+
height = int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
35 |
+
|
36 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
37 |
+
output_video = cv2.VideoWriter(output_path, fourcc, frame_rate, (width, height))
|
38 |
+
|
39 |
+
while True:
|
40 |
+
ret, frame = video_cap.read()
|
41 |
+
if not ret:
|
42 |
+
break
|
43 |
+
|
44 |
+
# Apply the face swap model (you need to insert the actual face swap model logic here)
|
45 |
+
swapped_frame = frame # Placeholder, replace with actual model inference
|
46 |
+
|
47 |
+
output_video.write(swapped_frame)
|
48 |
+
|
49 |
+
video_cap.release()
|
50 |
+
output_video.release()
|
51 |
+
else:
|
52 |
+
# Return the original video without face swap if gender doesn't match
|
53 |
+
output_path = video_path # No face swap applied
|
54 |
+
|
55 |
+
return output_path
|
56 |
+
|
57 |
+
# Gradio UI
|
58 |
+
iface = gr.Interface(
|
59 |
+
fn=swap_faces,
|
60 |
+
inputs=[
|
61 |
+
gr.Image(type="pil", label="Upload Reference Image"),
|
62 |
+
gr.Video(type="file", label="Upload Video"),
|
63 |
+
gr.Dropdown(choices=["all", "male", "female"], label="Select Gender")
|
64 |
+
],
|
65 |
+
outputs=gr.Video(label="Face Swapped Video"),
|
66 |
+
title="Video Face Swap",
|
67 |
+
description="Upload a reference image and a video to swap faces based on gender selection."
|
68 |
+
)
|
69 |
+
|
70 |
+
iface.launch()
|
71 |
+
|