Spaces:
Sleeping
Sleeping
Anonymous
commited on
Commit
·
9a6f281
1
Parent(s):
186fae0
add 512
Browse files
app.py
CHANGED
@@ -17,22 +17,6 @@ from funcs import (
|
|
17 |
)
|
18 |
from utils.utils import instantiate_from_config
|
19 |
|
20 |
-
ckpt_path_512 = "checkpoints/base_512_v1/model_512.ckpt"
|
21 |
-
ckpt_dir_512 = "checkpoints/base_512_v1"
|
22 |
-
os.makedirs(ckpt_dir_512, exist_ok=True)
|
23 |
-
hf_hub_download(repo_id="MoonQiu/LongerCrafter", filename="model_512.ckpt", local_dir=ckpt_dir_512)
|
24 |
-
|
25 |
-
# ckpt_path_1024 = "checkpoints/base_1024_v1/model.ckpt"
|
26 |
-
# ckpt_dir_1024 = "checkpoints/base_1024_v1"
|
27 |
-
# os.makedirs(ckpt_dir_1024, exist_ok=True)
|
28 |
-
# hf_hub_download(repo_id="VideoCrafter/Text2Video-1024", filename="model.ckpt", local_dir=ckpt_dir_1024)
|
29 |
-
|
30 |
-
# ckpt_path_256 = "checkpoints/base_256_v1/model_256.pth"
|
31 |
-
# ckpt_dir_256 = "checkpoints/base_256_v1"
|
32 |
-
# os.makedirs(ckpt_dir_256, exist_ok=True)
|
33 |
-
# hf_hub_download(repo_id="MoonQiu/LongerCrafter", filename="model_256.pth", local_dir=ckpt_dir_256)
|
34 |
-
|
35 |
-
|
36 |
def infer(prompt, output_size, seed, num_frames, ddim_steps, unconditional_guidance_scale, save_fps):
|
37 |
window_size = 16
|
38 |
window_stride = 4
|
@@ -40,11 +24,16 @@ def infer(prompt, output_size, seed, num_frames, ddim_steps, unconditional_guida
|
|
40 |
if output_size == "320x512":
|
41 |
width = 512
|
42 |
height = 320
|
|
|
|
|
43 |
config_512 = "configs/inference_t2v_tconv512_v1.0_freenoise.yaml"
|
44 |
config_512 = OmegaConf.load(config_512)
|
45 |
model_config_512 = config_512.pop("model", OmegaConf.create())
|
46 |
model_512 = instantiate_from_config(model_config_512)
|
47 |
model_512 = model_512.cuda()
|
|
|
|
|
|
|
48 |
model_512 = load_model_checkpoint(model_512, ckpt_path_512)
|
49 |
model_512.eval()
|
50 |
model = model_512
|
@@ -52,11 +41,16 @@ def infer(prompt, output_size, seed, num_frames, ddim_steps, unconditional_guida
|
|
52 |
elif output_size == "576x1024":
|
53 |
width = 1024
|
54 |
height = 576
|
|
|
|
|
55 |
config_1024 = "configs/inference_t2v_1024_v1.0_freenoise.yaml"
|
56 |
config_1024 = OmegaConf.load(config_1024)
|
57 |
model_config_1024 = config_1024.pop("model", OmegaConf.create())
|
58 |
model_1024 = instantiate_from_config(model_config_1024)
|
59 |
model_1024 = model_1024.cuda()
|
|
|
|
|
|
|
60 |
model_1024 = load_model_checkpoint(model_1024, ckpt_path_1024)
|
61 |
model_1024.eval()
|
62 |
model = model_1024
|
@@ -64,11 +58,16 @@ def infer(prompt, output_size, seed, num_frames, ddim_steps, unconditional_guida
|
|
64 |
# elif output_size == "256x256":
|
65 |
# width = 256
|
66 |
# height = 256
|
|
|
|
|
67 |
# config_256 = "configs/inference_t2v_tconv256_v1.0_freenoise.yaml"
|
68 |
# config_256 = OmegaConf.load(config_256)
|
69 |
# model_config_256 = config_256.pop("model", OmegaConf.create())
|
70 |
# model_256 = instantiate_from_config(model_config_256)
|
71 |
# model_256 = model_256.cuda()
|
|
|
|
|
|
|
72 |
# model_256 = load_model_checkpoint(model_256, ckpt_path_256)
|
73 |
# model_256.eval()
|
74 |
# model = model_256
|
@@ -270,8 +269,7 @@ with gr.Blocks(css=css) as demo:
|
|
270 |
with gr.Row():
|
271 |
with gr.Accordion('FreeNoise Parameters (feel free to adjust these parameters based on your prompt): ', open=False):
|
272 |
with gr.Row():
|
273 |
-
|
274 |
-
output_size = gr.Dropdown(["320x512", "576x1024"], value="320x512", label="Output Size", info="576x1024 will cost around 900s")
|
275 |
with gr.Row():
|
276 |
num_frames = gr.Slider(label='Frames (a multiple of 4)',
|
277 |
minimum=16,
|
|
|
17 |
)
|
18 |
from utils.utils import instantiate_from_config
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
def infer(prompt, output_size, seed, num_frames, ddim_steps, unconditional_guidance_scale, save_fps):
|
21 |
window_size = 16
|
22 |
window_stride = 4
|
|
|
24 |
if output_size == "320x512":
|
25 |
width = 512
|
26 |
height = 320
|
27 |
+
ckpt_dir_512 = "checkpoints/base_512_v1"
|
28 |
+
ckpt_path_512 = "checkpoints/base_512_v1/model_512.ckpt"
|
29 |
config_512 = "configs/inference_t2v_tconv512_v1.0_freenoise.yaml"
|
30 |
config_512 = OmegaConf.load(config_512)
|
31 |
model_config_512 = config_512.pop("model", OmegaConf.create())
|
32 |
model_512 = instantiate_from_config(model_config_512)
|
33 |
model_512 = model_512.cuda()
|
34 |
+
if not os.path.exists(ckpt_path_512):
|
35 |
+
os.makedirs(ckpt_dir_512, exist_ok=True)
|
36 |
+
hf_hub_download(repo_id="MoonQiu/LongerCrafter", filename="model_512.ckpt", local_dir=ckpt_dir_512)
|
37 |
model_512 = load_model_checkpoint(model_512, ckpt_path_512)
|
38 |
model_512.eval()
|
39 |
model = model_512
|
|
|
41 |
elif output_size == "576x1024":
|
42 |
width = 1024
|
43 |
height = 576
|
44 |
+
ckpt_dir_1024 = "checkpoints/base_1024_v1"
|
45 |
+
ckpt_path_1024 = "checkpoints/base_1024_v1/model.ckpt"
|
46 |
config_1024 = "configs/inference_t2v_1024_v1.0_freenoise.yaml"
|
47 |
config_1024 = OmegaConf.load(config_1024)
|
48 |
model_config_1024 = config_1024.pop("model", OmegaConf.create())
|
49 |
model_1024 = instantiate_from_config(model_config_1024)
|
50 |
model_1024 = model_1024.cuda()
|
51 |
+
if not os.path.exists(ckpt_path_1024):
|
52 |
+
os.makedirs(ckpt_dir_1024, exist_ok=True)
|
53 |
+
hf_hub_download(repo_id="VideoCrafter/Text2Video-1024", filename="model.ckpt", local_dir=ckpt_dir_1024)
|
54 |
model_1024 = load_model_checkpoint(model_1024, ckpt_path_1024)
|
55 |
model_1024.eval()
|
56 |
model = model_1024
|
|
|
58 |
# elif output_size == "256x256":
|
59 |
# width = 256
|
60 |
# height = 256
|
61 |
+
# ckpt_dir_256 = "checkpoints/base_256_v1"
|
62 |
+
# ckpt_path_256 = "checkpoints/base_256_v1/model_256.pth"
|
63 |
# config_256 = "configs/inference_t2v_tconv256_v1.0_freenoise.yaml"
|
64 |
# config_256 = OmegaConf.load(config_256)
|
65 |
# model_config_256 = config_256.pop("model", OmegaConf.create())
|
66 |
# model_256 = instantiate_from_config(model_config_256)
|
67 |
# model_256 = model_256.cuda()
|
68 |
+
# if not os.path.exists(ckpt_path_256):
|
69 |
+
# os.makedirs(ckpt_dir_256, exist_ok=True)
|
70 |
+
# hf_hub_download(repo_id="MoonQiu/LongerCrafter", filename="model_256.pth", local_dir=ckpt_dir_256)
|
71 |
# model_256 = load_model_checkpoint(model_256, ckpt_path_256)
|
72 |
# model_256.eval()
|
73 |
# model = model_256
|
|
|
269 |
with gr.Row():
|
270 |
with gr.Accordion('FreeNoise Parameters (feel free to adjust these parameters based on your prompt): ', open=False):
|
271 |
with gr.Row():
|
272 |
+
output_size = gr.Dropdown(["320x512", "576x1024"], value="320x512", label="Output Size", info="around 350s for 320x512, around 900s for 576x1024")
|
|
|
273 |
with gr.Row():
|
274 |
num_frames = gr.Slider(label='Frames (a multiple of 4)',
|
275 |
minimum=16,
|