Spaces:
Runtime error
Runtime error
Commit
Β·
60b82a2
1
Parent(s):
cea9a36
Update app.py
Browse files
app.py
CHANGED
@@ -15,6 +15,8 @@ from lvdm.utils.common_utils import str2bool
|
|
15 |
from lvdm.utils.dist_utils import setup_dist, gather_data
|
16 |
from lvdm.utils.saving_utils import npz_to_video_grid, npz_to_imgsheet_5d
|
17 |
from utils import load_model, get_conditions, make_model_input_shape, torch_to_np
|
|
|
|
|
18 |
from huggingface_hub import hf_hub_download
|
19 |
|
20 |
config_path = "model_config.yaml"
|
@@ -129,7 +131,17 @@ def get_video(prompt, seed, ddim_steps):
|
|
129 |
)
|
130 |
return save_results(samples)
|
131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
from gradio_t2v import create_demo as create_demo_basic
|
|
|
133 |
|
134 |
DESCRIPTION = '# [Latent Video Diffusion Models](https://github.com/VideoCrafter/VideoCrafter)'
|
135 |
DESCRIPTION += '\n<p>π€π€π€ VideoCrafter is an open-source video generation and editing toolbox for crafting video content. This model can only be used for non-commercial purposes. To learn more about the model, take a look at the <a href="https://github.com/VideoCrafter/VideoCrafter" style="text-decoration: underline;" target="_blank">model card</a>.</p>'
|
@@ -139,6 +151,8 @@ with gr.Blocks(css='style.css') as demo:
|
|
139 |
with gr.Tabs():
|
140 |
with gr.TabItem('Basic Text2Video'):
|
141 |
create_demo_basic(get_video)
|
|
|
|
|
142 |
|
143 |
demo.queue(api_open=False).launch()
|
144 |
|
|
|
15 |
from lvdm.utils.dist_utils import setup_dist, gather_data
|
16 |
from lvdm.utils.saving_utils import npz_to_video_grid, npz_to_imgsheet_5d
|
17 |
from utils import load_model, get_conditions, make_model_input_shape, torch_to_np
|
18 |
+
from lvdm.models.modules.lora import change_lora
|
19 |
+
|
20 |
from huggingface_hub import hf_hub_download
|
21 |
|
22 |
config_path = "model_config.yaml"
|
|
|
131 |
)
|
132 |
return save_results(samples)
|
133 |
|
134 |
+
def get_video_lora(prompt, seed, ddim_steps):
|
135 |
+
seed_everything(seed)
|
136 |
+
change_lora(model, inject_lora=True, lora_scale=1.0,lora_path = filename_list[2])
|
137 |
+
samples = sample_text2video(model, prompt, n_samples = 1, batch_size = 1,
|
138 |
+
sampler=ddim_sampler, ddim_steps=ddim_steps
|
139 |
+
)
|
140 |
+
return save_results(samples)
|
141 |
+
|
142 |
+
|
143 |
from gradio_t2v import create_demo as create_demo_basic
|
144 |
+
from gradio_videolora import create_demo as create_demo_videolora
|
145 |
|
146 |
DESCRIPTION = '# [Latent Video Diffusion Models](https://github.com/VideoCrafter/VideoCrafter)'
|
147 |
DESCRIPTION += '\n<p>π€π€π€ VideoCrafter is an open-source video generation and editing toolbox for crafting video content. This model can only be used for non-commercial purposes. To learn more about the model, take a look at the <a href="https://github.com/VideoCrafter/VideoCrafter" style="text-decoration: underline;" target="_blank">model card</a>.</p>'
|
|
|
151 |
with gr.Tabs():
|
152 |
with gr.TabItem('Basic Text2Video'):
|
153 |
create_demo_basic(get_video)
|
154 |
+
with gr.TabItem('VideoLoRA'):
|
155 |
+
create_demo_videolora(get_video_lora)
|
156 |
|
157 |
demo.queue(api_open=False).launch()
|
158 |
|