sky24h's picture
init commit
2f3aac0
raw
history blame
2.98 kB
import spaces
import gradio as gr
from inference_utils import inference
@spaces.GPU(duration=120)
def send_to_model(source_video, prompt, neg_prompt, guidance_scale, video_length, old_qk):
return inference(prompt=prompt, neg_prompt=neg_prompt, guidance_scale=guidance_scale, video_length=video_length, video_path=source_video, old_qk=old_qk)
if __name__ == "__main__":
with gr.Blocks() as demo:
gr.HTML(
"""
<h1 style="text-align: center; font-size: 32px; font-family: 'Times New Roman', Times, serif;">
FLATTEN: optical FLow-guided ATTENtion for consistent text-to-video editing
</h1>
<p style="text-align: center; font-size: 20px; font-family: 'Times New Roman', Times, serif;">
<a style="text-align: center; display:inline-block"
href="https://flatten-video-editing.github.io/">
<img src="https://huggingface.co/datasets/huggingface/badges/raw/main/paper-page-sm.svg#center"
alt="Paper Page">
</a>
<a style="text-align: center; display:inline-block" href="https://huggingface.co/spaces/sky24h/FLATTEN-unofficial?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg#center" alt="Duplicate Space">
</a>
</p>
"""
)
gr.Interface(
fn=send_to_model,
inputs=[
gr.Video(value=None, label="Source Image"),
gr.Textbox(value="", label="Prompt"),
gr.Textbox(value="", label="Negative Prompt"),
gr.Slider(
value = 15,
minimum = 10,
maximum = 30,
step = 1,
label = "guidance_scale",
info = "The scale of the guidance field.",
),
gr.Textbox(value=16, label="Video Length", info="The length of the video, must be less than 16 frames in the online demo to avoid timeout. However, you can run the model locally to process longer videos."),
gr.Dropdown(value=0, choices=[0, 1], label="Choose Option", info="Select 0 or 1."),
],
outputs=[gr.Video(label="output", autoplay=True)],
allow_flagging="never",
description="This is an unofficial demo for the paper 'FLATTEN: optical FLow-guided ATTENtion for consistent text-to-video editing'.",
examples=[
["./data/puff.mp4", "A Tiger, high quality", "a cat with big eyes, deformed", 20, 16, 0],
["./data/background.mp4", "pointillism painting, detailed", "", 25, 16, 1],
["./data/trucks-race.mp4", "Wooden trucks drive on a racetrack.", "", 15, 16, 1],
],
cache_examples=True,
)
demo.queue(max_size=10).launch()