File size: 3,238 Bytes
a7bb0d5
 
4c9c347
a7bb0d5
 
3c7b6f6
 
 
 
 
 
 
 
 
 
 
 
 
 
18ead1d
a7bb0d5
 
715cfb3
 
 
a7bb0d5
 
18ead1d
a7bb0d5
 
 
 
 
 
 
 
4c9c347
 
a7bb0d5
 
 
 
 
 
 
 
 
 
 
 
4f265a7
a7bb0d5
 
 
 
 
 
4f265a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a7bb0d5
 
 
 
18ead1d
a7bb0d5
 
 
 
 
 
18ead1d
4f265a7
a7bb0d5
bbe7bfb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import gradio as gr
import subprocess
import shutil
import os

from huggingface_hub import snapshot_download

# Define the folder name
folder_name = "lora_models"

# Create the folder
os.makedirs(folder_name, exist_ok=True)

# Download models
snapshot_download(
    repo_id = "Eyeline-Research/Go-with-the-Flow",
    local_dir = folder_name
)

def process_video(video_path, prompt, num_steps):
    
    output_folder="noise_warp_output_folder"
    if os.path.exists(output_folder):
        # Delete the folder and its contents
        shutil.rmtree(output_folder)
    output_video="output.mp4"
    device="cuda"
    num_steps=num_steps
    
    try:
        # Step 1: Warp the noise
        warp_command = [
            "python", "make_warped_noise.py", video_path,
            "--output_folder", output_folder
        ]
        subprocess.run(warp_command, check=True)

        warped_vid_path = os.path.join(output_folder, "input.mp4")
        
        # Step 2: Run inference
        inference_command = [
            "python", "cut_and_drag_inference.py", output_folder,
            "--prompt", prompt,
            "--output_mp4_path", output_video,
            "--device", device,
            "--num_inference_steps", str(num_steps)
        ]
        subprocess.run(inference_command, check=True)
        
        # Return the path to the output video
        return output_video
    except subprocess.CalledProcessError as e:
        
        raise gr.Error(f"An error occurred: {str(e)}")

with gr.Blocks() as demo:
    with gr.Column():
        gr.Markdown("# Go-With-The-Flow • Cut and Drag")
        gr.HTML("""
        <div style="display:flex;column-gap:4px;">
            <a href="https://github.com/Eyeline-Research/Go-with-the-Flow">
                <img src='https://img.shields.io/badge/GitHub-Repo-blue'>
            </a> 
            <a href="https://arxiv.org/abs/2501.08331">
                <img src='https://img.shields.io/badge/ArXiv-Paper-red'>
            </a>
            <a href="https://eyeline-research.github.io/Go-with-the-Flow/">
                <img src='https://img.shields.io/badge/Project-Page-green'>
            </a>
            <a href="https://huggingface.co/spaces/fffiloni/Go-With-The-Flow?duplicate=true">
                <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-sm.svg" alt="Duplicate this Space">
            </a>
            <a href="https://huggingface.co/fffiloni">
                <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/follow-me-on-HF-sm-dark.svg" alt="Follow me on HF">
            </a>
        </div>
        """)
        with gr.Row():
            with gr.Column():
                input_video = gr.Video(label="Input Video")
                prompt = gr.Textbox(label="Prompt")
                num_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=30, value=5, step=1)
                submit_btn = gr.Button("Submit")
            with gr.Column():
                output_video = gr.Video(label="Result")

    submit_btn.click(
        fn = process_video,
        inputs = [input_video, prompt, num_steps],
        outputs = [output_video]
    )

demo.queue().launch(show_api=False)