KEEP-docker / app.py
fffiloni's picture
Update app.py
6c7dbc1 verified
raw
history blame
4.08 kB
import os
import torch
import gradio as gr
import subprocess
import datetime
import sys
def run_command(command):
"""Run a shell command and print its output."""
print(f"Running command: {' '.join(command)}")
try:
subprocess.check_call(command, shell=True)
except subprocess.CalledProcessError as e:
print(f"Error running command {command}: {e}")
sys.exit(1)
def check_for_mp4_in_outputs(given_folder):
# Define the path to the outputs folder
outputs_folder = given_folder
# Check if the outputs folder exists
if not os.path.exists(outputs_folder):
return None
# Check if there is a .mp4 file in the outputs folder
mp4_files = [f for f in os.listdir(outputs_folder) if f.endswith('.mp4')]
# Return the path to the mp4 file if it exists
if mp4_files:
return os.path.join(outputs_folder, mp4_files[0])
else:
return None
def infer(input_video, cropped_and_aligned):
torch.cuda.empty_cache()
filepath = input_video
# Get the current timestamp
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
output_folder_name = f"results_{timestamp}"
if cropped_and_aligned is True:
# Example: Run the inference script (replace with your actual command)
run_command(f"{sys.executable} inference_keep.py -i={filepath} -o={output_folder_name} --has_aligned --save_video -s=1")
else:
run_command(f"{sys.executable} inference_keep.py -i={filepath} -o={output_folder_name} --draw_box --save_video -s=1 --bg_upsampler=realesrgan")
torch.cuda.empty_cache()
# Call the function and print the result
this_infer_folder = os.path.splitext(os.path.basename(filepath))[0]
joined_path = os.path.join(output_folder_name, this_infer_folder)
mp4_file_path = check_for_mp4_in_outputs(joined_path)
print(mp4_file_path)
print(f"RESULT: {mp4_file_path}")
return mp4_file_path
result_video = gr.Video()
with gr.Blocks() as demo:
with gr.Column():
gr.Markdown("# KEEP")
gr.Markdown("## Kalman-Inspired Feature Propagation for Video Face Super-Resolution")
gr.HTML("""
<div style="display:flex;column-gap:4px;">
<a href='https://jnjaby.github.io/projects/KEEP/'>
<img src='https://img.shields.io/badge/Project-Page-Green'>
</a>
<a href='https://arxiv.org/abs/2408.05205'>
<img src='https://img.shields.io/badge/Paper-Arxiv-red'>
</a>
</div>
""")
with gr.Row():
with gr.Column():
input_video = gr.Video(label="Input Video")
is_cropped_and_aligned = gr.Checkbox(label="Synthetic data", info="Is your input video ready with cropped and aligned faces ?", value=False)
submit_btn = gr.Button("Submit")
gr.Examples(
examples = [
["./assets/examples/synthetic_1.mp4", True],
["./assets/examples/synthetic_2.mp4", True],
["./assets/examples/synthetic_3.mp4", True],
["./assets/examples/synthetic_4.mp4", True],
["./assets/examples/real_1.mp4", False],
["./assets/examples/real_2.mp4", False],
["./assets/examples/real_3.mp4", False],
["./assets/examples/real_4.mp4", False]
],
fn = infer,
inputs = [input_video, is_cropped_and_aligned],
outputs = [result_video],
run_on_click = False,
cache_examples = "lazy"
)
with gr.Column():
result_video.render()
submit_btn.click(
fn = infer,
inputs = [input_video, is_cropped_and_aligned],
outputs = [result_video],
show_api=False
)
demo.queue().launch(show_error=True, show_api=False)