Spaces:
Paused
Paused
Update app_t2v.py
Browse files- app_t2v.py +10 -2
app_t2v.py
CHANGED
@@ -17,12 +17,12 @@ def create_demo() -> gr.Blocks:
|
|
17 |
def text_to_video(
|
18 |
prompt: str,
|
19 |
negative_prompt: str,
|
|
|
20 |
width: int = 512,
|
21 |
height: int = 320,
|
22 |
num_frames: int = 100,
|
23 |
frame_rate: int = 20,
|
24 |
num_inference_steps: int = 30,
|
25 |
-
seed: int = 8,
|
26 |
progress: gr.Progress = gr.Progress(),
|
27 |
):
|
28 |
generator = torch.Generator(device=device).manual_seed(seed)
|
@@ -80,6 +80,14 @@ def create_demo() -> gr.Blocks:
|
|
80 |
lines=2,
|
81 |
)
|
82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
txt2vid_generate = gr.Button(
|
84 |
"Generate Video",
|
85 |
variant="primary",
|
@@ -92,7 +100,7 @@ def create_demo() -> gr.Blocks:
|
|
92 |
|
93 |
txt2vid_generate.click(
|
94 |
fn=text_to_video,
|
95 |
-
inputs=[txt2vid_prompt, txt2vid_negative_prompt],
|
96 |
outputs=[txt2vid_output, txt2vid_generated_cost],
|
97 |
)
|
98 |
|
|
|
17 |
def text_to_video(
|
18 |
prompt: str,
|
19 |
negative_prompt: str,
|
20 |
+
seed: int = 8,
|
21 |
width: int = 512,
|
22 |
height: int = 320,
|
23 |
num_frames: int = 100,
|
24 |
frame_rate: int = 20,
|
25 |
num_inference_steps: int = 30,
|
|
|
26 |
progress: gr.Progress = gr.Progress(),
|
27 |
):
|
28 |
generator = torch.Generator(device=device).manual_seed(seed)
|
|
|
80 |
lines=2,
|
81 |
)
|
82 |
|
83 |
+
txt2vid_seed = gr.Slider(
|
84 |
+
label="Seed",
|
85 |
+
minimum=0,
|
86 |
+
maximum=2**32,
|
87 |
+
step=1,
|
88 |
+
value=8,
|
89 |
+
)
|
90 |
+
|
91 |
txt2vid_generate = gr.Button(
|
92 |
"Generate Video",
|
93 |
variant="primary",
|
|
|
100 |
|
101 |
txt2vid_generate.click(
|
102 |
fn=text_to_video,
|
103 |
+
inputs=[txt2vid_prompt, txt2vid_negative_prompt, txt2vid_seed],
|
104 |
outputs=[txt2vid_output, txt2vid_generated_cost],
|
105 |
)
|
106 |
|