Spaces:
Running
Running
victorisgeek
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -189,21 +189,21 @@ def process(
|
|
189 |
|
190 |
|
191 |
|
192 |
-
yield "### \n
|
193 |
load_face_analyser_model()
|
194 |
|
195 |
-
yield "### \n
|
196 |
load_face_swapper_model()
|
197 |
|
198 |
if face_enhancer_name != "NONE":
|
199 |
if face_enhancer_name not in cv2_interpolations:
|
200 |
-
yield f"### \n
|
201 |
FACE_ENHANCER = load_face_enhancer_model(name=face_enhancer_name, device=device)
|
202 |
else:
|
203 |
FACE_ENHANCER = None
|
204 |
|
205 |
if enable_face_parser:
|
206 |
-
yield "### \n
|
207 |
load_face_parser_model()
|
208 |
|
209 |
includes = mask_regions_to_list(mask_includes)
|
@@ -221,7 +221,7 @@ def process(
|
|
221 |
## ------------------------------ CONTENT CHECK ------------------------------
|
222 |
|
223 |
|
224 |
-
yield "### \n
|
225 |
if condition != "Specific Face":
|
226 |
source_data = source_path, age
|
227 |
else:
|
@@ -237,7 +237,7 @@ def process(
|
|
237 |
|
238 |
## ------------------------------ SWAP FUNC ------------------------------
|
239 |
|
240 |
-
yield "### \n
|
241 |
preds = []
|
242 |
matrs = []
|
243 |
count = 0
|
@@ -251,13 +251,13 @@ def process(
|
|
251 |
if USE_CUDA:
|
252 |
image_grid = create_image_grid(batch_pred, size=128)
|
253 |
PREVIEW = image_grid[:, :, ::-1]
|
254 |
-
yield f"### \n
|
255 |
|
256 |
## ------------------------------ FACE ENHANCEMENT ------------------------------
|
257 |
|
258 |
generated_len = len(preds)
|
259 |
if face_enhancer_name != "NONE":
|
260 |
-
yield f"### \n
|
261 |
for idx, pred in tqdm(enumerate(preds), total=generated_len, desc=f"Upscaling with {face_enhancer_name}"):
|
262 |
enhancer_model, enhancer_model_runner = FACE_ENHANCER
|
263 |
pred = enhancer_model_runner(pred, enhancer_model)
|
@@ -267,7 +267,7 @@ def process(
|
|
267 |
## ------------------------------ FACE PARSING ------------------------------
|
268 |
|
269 |
if enable_face_parser:
|
270 |
-
yield "### \n
|
271 |
masks = []
|
272 |
count = 0
|
273 |
for batch_mask in get_parsed_mask(FACE_PARSER, preds, classes=includes, device=device, batch_size=BATCH_SIZE, softness=int(mask_soft_iterations)):
|
@@ -278,7 +278,7 @@ def process(
|
|
278 |
if len(batch_mask) > 1:
|
279 |
image_grid = create_image_grid(batch_mask, size=128)
|
280 |
PREVIEW = image_grid[:, :, ::-1]
|
281 |
-
yield f"### \n
|
282 |
masks = np.concatenate(masks, axis=0) if len(masks) >= 1 else masks
|
283 |
else:
|
284 |
masks = [None] * generated_len
|
@@ -294,7 +294,7 @@ def process(
|
|
294 |
|
295 |
## ------------------------------ PASTE-BACK ------------------------------
|
296 |
|
297 |
-
yield "### \n
|
298 |
def post_process(frame_idx, frame_img, split_preds, split_matrs, split_masks, enable_laplacian_blend, crop_mask, blur_amount, erode_amount):
|
299 |
whole_img_path = frame_img
|
300 |
whole_img = cv2.imread(whole_img_path)
|
@@ -350,7 +350,7 @@ def process(
|
|
350 |
temp_path = os.path.join(output_path, output_name, "sequence")
|
351 |
os.makedirs(temp_path, exist_ok=True)
|
352 |
|
353 |
-
yield "### \n
|
354 |
image_sequence = []
|
355 |
cap = cv2.VideoCapture(video_path)
|
356 |
curr_idx = 0
|
@@ -367,12 +367,12 @@ def process(
|
|
367 |
for info_update in swap_process(image_sequence):
|
368 |
yield info_update
|
369 |
|
370 |
-
yield "### \n
|
371 |
output_video_path = os.path.join(output_path, output_name + ".mp4")
|
372 |
merge_img_sequence_from_ref(video_path, image_sequence, output_video_path)
|
373 |
|
374 |
if os.path.exists(temp_path) and not keep_output_sequence:
|
375 |
-
yield "### \n
|
376 |
shutil.rmtree(temp_path)
|
377 |
|
378 |
WORKSPACE = output_path
|
@@ -490,7 +490,7 @@ def video_changed(video_path):
|
|
490 |
|
491 |
|
492 |
def analyse_settings_changed(detect_condition, detection_size, detection_threshold):
|
493 |
-
yield "### \n
|
494 |
global FACE_ANALYSER
|
495 |
global DETECT_CONDITION
|
496 |
DETECT_CONDITION = detect_condition
|
@@ -526,7 +526,7 @@ def slider_changed(show_frame, video_path, frame_index):
|
|
526 |
|
527 |
|
528 |
def trim_and_reload(video_path, output_path, output_name, start_frame, stop_frame):
|
529 |
-
yield video_path, f"### \n
|
530 |
try:
|
531 |
output_path = os.path.join(output_path, output_name)
|
532 |
trimmed_video = trim_video(video_path, output_path, start_frame, stop_frame)
|
@@ -543,12 +543,12 @@ footer{display:none !important}
|
|
543 |
"""
|
544 |
|
545 |
with gr.Blocks(css=css) as interface:
|
546 |
-
gr.Markdown("#
|
547 |
gr.Markdown("### Face swap app based on insightface inswapper.")
|
548 |
with gr.Row():
|
549 |
with gr.Row():
|
550 |
with gr.Column(scale=0.4):
|
551 |
-
with gr.Tab("
|
552 |
swap_option = gr.Dropdown(
|
553 |
swap_options_list,
|
554 |
info="Choose which face or faces in the target image to swap.",
|
@@ -561,7 +561,7 @@ with gr.Blocks(css=css) as interface:
|
|
561 |
value=25, label="Value", interactive=True, visible=False
|
562 |
)
|
563 |
|
564 |
-
with gr.Tab("
|
565 |
detect_condition_dropdown = gr.Dropdown(
|
566 |
detect_conditions,
|
567 |
label="Condition",
|
@@ -579,7 +579,7 @@ with gr.Blocks(css=css) as interface:
|
|
579 |
)
|
580 |
apply_detection_settings = gr.Button("Apply settings")
|
581 |
|
582 |
-
with gr.Tab("
|
583 |
output_directory = gr.Text(
|
584 |
label="Output Directory",
|
585 |
value=DEF_OUTPUT_PATH,
|
@@ -592,7 +592,7 @@ with gr.Blocks(css=css) as interface:
|
|
592 |
label="Keep output sequence", value=False, interactive=True
|
593 |
)
|
594 |
|
595 |
-
with gr.Tab("
|
596 |
face_scale = gr.Slider(
|
597 |
label="Face Scale",
|
598 |
minimum=0,
|
@@ -707,7 +707,7 @@ with gr.Blocks(css=css) as interface:
|
|
707 |
video_input = gr.Video(
|
708 |
label="Target Video", interactive=True
|
709 |
)
|
710 |
-
with gr.Accordion("
|
711 |
with gr.Column():
|
712 |
with gr.Row():
|
713 |
set_slider_range_btn = gr.Button(
|
@@ -754,8 +754,8 @@ with gr.Blocks(css=css) as interface:
|
|
754 |
info = gr.Markdown(value="...")
|
755 |
|
756 |
with gr.Row():
|
757 |
-
swap_button = gr.Button("
|
758 |
-
cancel_button = gr.Button("
|
759 |
|
760 |
preview_image = gr.Image(label="Output", interactive=False)
|
761 |
preview_video = gr.Video(
|
@@ -764,28 +764,28 @@ with gr.Blocks(css=css) as interface:
|
|
764 |
|
765 |
with gr.Row():
|
766 |
output_directory_button = gr.Button(
|
767 |
-
"
|
768 |
)
|
769 |
output_video_button = gr.Button(
|
770 |
-
"
|
771 |
)
|
772 |
|
773 |
with gr.Box():
|
774 |
with gr.Row():
|
775 |
gr.Markdown(
|
776 |
-
"### [
|
777 |
)
|
778 |
gr.Markdown(
|
779 |
-
"### [
|
780 |
)
|
781 |
gr.Markdown(
|
782 |
-
"### [
|
783 |
)
|
784 |
gr.Markdown(
|
785 |
-
"### [
|
786 |
)
|
787 |
gr.Markdown(
|
788 |
-
"### [π€
|
789 |
)
|
790 |
|
791 |
## ------------------------------ GRADIO EVENTS ------------------------------
|
|
|
189 |
|
190 |
|
191 |
|
192 |
+
yield "### \n π Loading face analyser model...", *ui_before()
|
193 |
load_face_analyser_model()
|
194 |
|
195 |
+
yield "### \n π Loading face swapper model...", *ui_before()
|
196 |
load_face_swapper_model()
|
197 |
|
198 |
if face_enhancer_name != "NONE":
|
199 |
if face_enhancer_name not in cv2_interpolations:
|
200 |
+
yield f"### \n πͺ Loading {face_enhancer_name} model...", *ui_before()
|
201 |
FACE_ENHANCER = load_face_enhancer_model(name=face_enhancer_name, device=device)
|
202 |
else:
|
203 |
FACE_ENHANCER = None
|
204 |
|
205 |
if enable_face_parser:
|
206 |
+
yield "### \n 𧲠Loading face parsing model...", *ui_before()
|
207 |
load_face_parser_model()
|
208 |
|
209 |
includes = mask_regions_to_list(mask_includes)
|
|
|
221 |
## ------------------------------ CONTENT CHECK ------------------------------
|
222 |
|
223 |
|
224 |
+
yield "### \n π‘ Analysing face data...", *ui_before()
|
225 |
if condition != "Specific Face":
|
226 |
source_data = source_path, age
|
227 |
else:
|
|
|
237 |
|
238 |
## ------------------------------ SWAP FUNC ------------------------------
|
239 |
|
240 |
+
yield "### \n βοΈ Generating faces...", *ui_before()
|
241 |
preds = []
|
242 |
matrs = []
|
243 |
count = 0
|
|
|
251 |
if USE_CUDA:
|
252 |
image_grid = create_image_grid(batch_pred, size=128)
|
253 |
PREVIEW = image_grid[:, :, ::-1]
|
254 |
+
yield f"### \n βοΈ Generating face Batch {count}", *ui_before()
|
255 |
|
256 |
## ------------------------------ FACE ENHANCEMENT ------------------------------
|
257 |
|
258 |
generated_len = len(preds)
|
259 |
if face_enhancer_name != "NONE":
|
260 |
+
yield f"### \n π Upscaling faces with {face_enhancer_name}...", *ui_before()
|
261 |
for idx, pred in tqdm(enumerate(preds), total=generated_len, desc=f"Upscaling with {face_enhancer_name}"):
|
262 |
enhancer_model, enhancer_model_runner = FACE_ENHANCER
|
263 |
pred = enhancer_model_runner(pred, enhancer_model)
|
|
|
267 |
## ------------------------------ FACE PARSING ------------------------------
|
268 |
|
269 |
if enable_face_parser:
|
270 |
+
yield "### \n ποΈ Face-parsing mask...", *ui_before()
|
271 |
masks = []
|
272 |
count = 0
|
273 |
for batch_mask in get_parsed_mask(FACE_PARSER, preds, classes=includes, device=device, batch_size=BATCH_SIZE, softness=int(mask_soft_iterations)):
|
|
|
278 |
if len(batch_mask) > 1:
|
279 |
image_grid = create_image_grid(batch_mask, size=128)
|
280 |
PREVIEW = image_grid[:, :, ::-1]
|
281 |
+
yield f"### \n βοΈ Face parsing Batch {count}", *ui_before()
|
282 |
masks = np.concatenate(masks, axis=0) if len(masks) >= 1 else masks
|
283 |
else:
|
284 |
masks = [None] * generated_len
|
|
|
294 |
|
295 |
## ------------------------------ PASTE-BACK ------------------------------
|
296 |
|
297 |
+
yield "### \n π οΈ Pasting back...", *ui_before()
|
298 |
def post_process(frame_idx, frame_img, split_preds, split_matrs, split_masks, enable_laplacian_blend, crop_mask, blur_amount, erode_amount):
|
299 |
whole_img_path = frame_img
|
300 |
whole_img = cv2.imread(whole_img_path)
|
|
|
350 |
temp_path = os.path.join(output_path, output_name, "sequence")
|
351 |
os.makedirs(temp_path, exist_ok=True)
|
352 |
|
353 |
+
yield "### \n π½ Extracting video frames...", *ui_before()
|
354 |
image_sequence = []
|
355 |
cap = cv2.VideoCapture(video_path)
|
356 |
curr_idx = 0
|
|
|
367 |
for info_update in swap_process(image_sequence):
|
368 |
yield info_update
|
369 |
|
370 |
+
yield "### \n π Merging sequence...", *ui_before()
|
371 |
output_video_path = os.path.join(output_path, output_name + ".mp4")
|
372 |
merge_img_sequence_from_ref(video_path, image_sequence, output_video_path)
|
373 |
|
374 |
if os.path.exists(temp_path) and not keep_output_sequence:
|
375 |
+
yield "### \n π½ Removing temporary files...", *ui_before()
|
376 |
shutil.rmtree(temp_path)
|
377 |
|
378 |
WORKSPACE = output_path
|
|
|
490 |
|
491 |
|
492 |
def analyse_settings_changed(detect_condition, detection_size, detection_threshold):
|
493 |
+
yield "### \n π‘ Applying new values..."
|
494 |
global FACE_ANALYSER
|
495 |
global DETECT_CONDITION
|
496 |
DETECT_CONDITION = detect_condition
|
|
|
526 |
|
527 |
|
528 |
def trim_and_reload(video_path, output_path, output_name, start_frame, stop_frame):
|
529 |
+
yield video_path, f"### \n π οΈ Trimming video frame {start_frame} to {stop_frame}..."
|
530 |
try:
|
531 |
output_path = os.path.join(output_path, output_name)
|
532 |
trimmed_video = trim_video(video_path, output_path, start_frame, stop_frame)
|
|
|
543 |
"""
|
544 |
|
545 |
with gr.Blocks(css=css) as interface:
|
546 |
+
gr.Markdown("# π§Έ Deepfake Faceswap")
|
547 |
gr.Markdown("### Face swap app based on insightface inswapper.")
|
548 |
with gr.Row():
|
549 |
with gr.Row():
|
550 |
with gr.Column(scale=0.4):
|
551 |
+
with gr.Tab("βοΈ Swap Condition"):
|
552 |
swap_option = gr.Dropdown(
|
553 |
swap_options_list,
|
554 |
info="Choose which face or faces in the target image to swap.",
|
|
|
561 |
value=25, label="Value", interactive=True, visible=False
|
562 |
)
|
563 |
|
564 |
+
with gr.Tab("𧫠Detection Settings"):
|
565 |
detect_condition_dropdown = gr.Dropdown(
|
566 |
detect_conditions,
|
567 |
label="Condition",
|
|
|
579 |
)
|
580 |
apply_detection_settings = gr.Button("Apply settings")
|
581 |
|
582 |
+
with gr.Tab("β»οΈ Output Settings"):
|
583 |
output_directory = gr.Text(
|
584 |
label="Output Directory",
|
585 |
value=DEF_OUTPUT_PATH,
|
|
|
592 |
label="Keep output sequence", value=False, interactive=True
|
593 |
)
|
594 |
|
595 |
+
with gr.Tab("β’οΈ Other Settings"):
|
596 |
face_scale = gr.Slider(
|
597 |
label="Face Scale",
|
598 |
minimum=0,
|
|
|
707 |
video_input = gr.Video(
|
708 |
label="Target Video", interactive=True
|
709 |
)
|
710 |
+
with gr.Accordion("π¨ Trim video", open=False):
|
711 |
with gr.Column():
|
712 |
with gr.Row():
|
713 |
set_slider_range_btn = gr.Button(
|
|
|
754 |
info = gr.Markdown(value="...")
|
755 |
|
756 |
with gr.Row():
|
757 |
+
swap_button = gr.Button("π― Swap", variant="primary")
|
758 |
+
cancel_button = gr.Button("β Cancel")
|
759 |
|
760 |
preview_image = gr.Image(label="Output", interactive=False)
|
761 |
preview_video = gr.Video(
|
|
|
764 |
|
765 |
with gr.Row():
|
766 |
output_directory_button = gr.Button(
|
767 |
+
"π", interactive=False, visible=False
|
768 |
)
|
769 |
output_video_button = gr.Button(
|
770 |
+
"π½οΈ", interactive=False, visible=False
|
771 |
)
|
772 |
|
773 |
with gr.Box():
|
774 |
with gr.Row():
|
775 |
gr.Markdown(
|
776 |
+
"### [π Sponsor]"
|
777 |
)
|
778 |
gr.Markdown(
|
779 |
+
"### [π₯οΈ Source code](https://huggingface.co/spaces/victorisgeek/SwapFace2Pon)"
|
780 |
)
|
781 |
gr.Markdown(
|
782 |
+
"### [ 𧩠Playground](https://huggingface.co/spaces/victorisgeek/SwapFace2Pon)"
|
783 |
)
|
784 |
gr.Markdown(
|
785 |
+
"### [πΈ Run in Colab](https://colab.research.google.com/github/victorgeel/FaceSwapNoNfsw/blob/main/SwapFace.ipynb)"
|
786 |
)
|
787 |
gr.Markdown(
|
788 |
+
"### [π€ Modified Version](https://github.com/victorgeel/FaceSwapNoNfsw)"
|
789 |
)
|
790 |
|
791 |
## ------------------------------ GRADIO EVENTS ------------------------------
|