Spaces:
Running
on
Zero
Running
on
Zero
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from diffusers import AutoencoderKLWan, WanPipeline
|
3 |
+
from diffusers.utils import export_to_video
|
4 |
+
import gradio as gr
|
5 |
+
import tempfile
|
6 |
+
import os
|
7 |
+
import spaces
|
8 |
+
from huggingface_hub import hf_hub_download
|
9 |
+
|
10 |
+
# --- Global Model Loading (runs once when the script starts) ---
|
11 |
+
MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
|
12 |
+
|
13 |
+
|
14 |
+
print(f"Loading VAE for {MODEL_ID}...")
|
15 |
+
# Use float32 for VAE for stability, as recommended in some cases
|
16 |
+
vae = AutoencoderKLWan.from_pretrained(
|
17 |
+
MODEL_ID,
|
18 |
+
subfolder="vae",
|
19 |
+
torch_dtype=torch.float32
|
20 |
+
)
|
21 |
+
print(f"Loading Pipeline {MODEL_ID}...")
|
22 |
+
# Use bfloat16 for the main pipeline for memory efficiency and speed
|
23 |
+
pipe = WanPipeline.from_pretrained(
|
24 |
+
MODEL_ID,
|
25 |
+
vae=vae,
|
26 |
+
torch_dtype=torch.bfloat16
|
27 |
+
)
|
28 |
+
print("Moving pipeline to CUDA...")
|
29 |
+
pipe.to("cuda")
|
30 |
+
causvid_path = hf_hub_download(repo_id="Kijai/WanVideo_comfy", filename="Wan21_CausVid_14B_T2V_lora_rank32.safetensors")
|
31 |
+
pipe.load_lora_weights(causvid_path)
|
32 |
+
|
33 |
+
# --- Gradio Interface Function ---
|
34 |
+
@spaces.GPU
|
35 |
+
def generate_video(prompt, negative_prompt, height, width, num_frames, guidance_scale, fps):
|
36 |
+
|
37 |
+
print("Starting video generation...")
|
38 |
+
print(f" Prompt: {prompt}")
|
39 |
+
print(f" Negative Prompt: {negative_prompt if negative_prompt else 'None'}")
|
40 |
+
print(f" Height: {height}, Width: {width}")
|
41 |
+
print(f" Num Frames: {num_frames}, FPS: {fps}")
|
42 |
+
print(f" Guidance Scale: {guidance_scale}")
|
43 |
+
|
44 |
+
# Ensure height and width are multiples of 8 (common requirement for VAEs)
|
45 |
+
height = (int(height) // 8) * 8
|
46 |
+
width = (int(width) // 8) * 8
|
47 |
+
num_frames = int(num_frames)
|
48 |
+
fps = int(fps)
|
49 |
+
|
50 |
+
with torch.inference_mode(): # Conserve memory
|
51 |
+
output_frames_list = pipe(
|
52 |
+
prompt=prompt,
|
53 |
+
negative_prompt=negative_prompt,
|
54 |
+
height=height,
|
55 |
+
width=width,
|
56 |
+
num_frames=num_frames,
|
57 |
+
guidance_scale=float(guidance_scale),
|
58 |
+
# num_inference_steps=25 # Default is 25, can be exposed if needed
|
59 |
+
).frames
|
60 |
+
|
61 |
+
if not output_frames_list or not output_frames_list[0]:
|
62 |
+
raise gr.Error("Model returned empty frames. Check parameters or try a different prompt.")
|
63 |
+
|
64 |
+
output_frames = output_frames_list[0] # The actual list of PIL Image frames
|
65 |
+
|
66 |
+
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
|
67 |
+
video_path = tmpfile.name
|
68 |
+
|
69 |
+
export_to_video(output_frames, video_path, fps=fps)
|
70 |
+
print(f"Video successfully generated and saved to {video_path}")
|
71 |
+
|
72 |
+
return video_path
|
73 |
+
|
74 |
+
|
75 |
+
# --- Gradio UI Definition ---
|
76 |
+
default_prompt = "A cat walks on the grass, realistic"
|
77 |
+
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"
|
78 |
+
|
79 |
+
with gr.Blocks() as demo:
|
80 |
+
gr.Markdown(f"""
|
81 |
+
# Text-to-Video with Wan 2.1 (14B)
|
82 |
+
Powered by `diffusers` and `Wan-AI/{MODEL_ID}`.
|
83 |
+
Model is loaded into memory when the app starts. This might take a few minutes.
|
84 |
+
Ensure you have a GPU with sufficient VRAM (e.g., ~24GB+ for these default settings).
|
85 |
+
""")
|
86 |
+
|
87 |
+
with gr.Row():
|
88 |
+
with gr.Column(scale=2):
|
89 |
+
prompt_input = gr.Textbox(label="Prompt", value=default_prompt, lines=3)
|
90 |
+
negative_prompt_input = gr.Textbox(
|
91 |
+
label="Negative Prompt (Optional)",
|
92 |
+
value=default_negative_prompt,
|
93 |
+
lines=3
|
94 |
+
)
|
95 |
+
with gr.Row():
|
96 |
+
height_input = gr.Slider(minimum=256, maximum=768, step=64, value=480, label="Height (multiple of 8)")
|
97 |
+
width_input = gr.Slider(minimum=256, maximum=1024, step=64, value=832, label="Width (multiple of 8)")
|
98 |
+
with gr.Row():
|
99 |
+
num_frames_input = gr.Slider(minimum=16, maximum=100, step=1, value=25, label="Number of Frames") # Reduced default for faster demo
|
100 |
+
fps_input = gr.Slider(minimum=5, maximum=30, step=1, value=15, label="Output FPS")
|
101 |
+
guidance_scale_input = gr.Slider(minimum=1.0, maximum=20.0, step=0.5, value=5.0, label="Guidance Scale")
|
102 |
+
|
103 |
+
generate_button = gr.Button("Generate Video", variant="primary")
|
104 |
+
|
105 |
+
with gr.Column(scale=3):
|
106 |
+
video_output = gr.Video(label="Generated Video")
|
107 |
+
|
108 |
+
generate_button.click(
|
109 |
+
fn=generate_video,
|
110 |
+
inputs=[
|
111 |
+
prompt_input,
|
112 |
+
negative_prompt_input,
|
113 |
+
height_input,
|
114 |
+
width_input,
|
115 |
+
num_frames_input,
|
116 |
+
guidance_scale_input,
|
117 |
+
fps_input
|
118 |
+
],
|
119 |
+
outputs=video_output
|
120 |
+
)
|
121 |
+
|
122 |
+
gr.Examples(
|
123 |
+
examples=[
|
124 |
+
["A panda eating bamboo in a lush forest, cinematic lighting", default_negative_prompt, 480, 832, 25, 5.0, 15],
|
125 |
+
["A majestic eagle soaring over snowy mountains", default_negative_prompt, 512, 768, 30, 7.0, 12],
|
126 |
+
["Timelapse of a flower blooming, vibrant colors", "static, ugly", 384, 640, 40, 6.0, 20],
|
127 |
+
["Astronaut walking on the moon, Earth in the background, highly detailed", default_negative_prompt, 480, 832, 20, 5.5, 10],
|
128 |
+
],
|
129 |
+
inputs=[prompt_input, negative_prompt_input, height_input, width_input, num_frames_input, guidance_scale_input, fps_input],
|
130 |
+
outputs=video_output,
|
131 |
+
fn=generate_video,
|
132 |
+
cache_examples=False # Caching videos can take a lot of space quickly
|
133 |
+
)
|
134 |
+
|
135 |
+
if __name__ == "__main__":
|
136 |
+
# The share=True option will create a public temporary link if you run this on Colab or similar
|
137 |
+
demo.queue().launch(share=True, debug=True)
|