linoyts HF Staff commited on
Commit
fad21f9
·
verified ·
1 Parent(s): 806b2b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -16
app.py CHANGED
@@ -23,36 +23,49 @@ def round_to_nearest_resolution_acceptable_by_vae(height, width):
23
  height = height - (height % pipe.vae_temporal_compression_ratio)
24
  width = width - (width % pipe.vae_temporal_compression_ratio)
25
  return height, width
 
 
 
 
 
 
 
 
 
26
 
27
  @spaces.GPU
28
  def generate(prompt,
29
  negative_prompt,
30
  image,
31
  video,
 
32
  steps,
33
  num_frames,
34
  seed,
35
  randomize_seed,
36
- t2v, improve_texture=False, progress=gr.Progress(track_tqdm=True)):
37
 
38
  if randomize_seed:
39
  seed = random.randint(0, MAX_SEED)
40
 
41
  # Part 1. Generate video at smaller resolution
42
  # Text-only conditioning is also supported without the need to pass `conditions`
43
- expected_height, expected_width = 768, 1152
44
  downscale_factor = 2 / 3
45
  downscaled_height, downscaled_width = int(expected_height * downscale_factor), int(expected_width * downscale_factor)
46
  downscaled_height, downscaled_width = round_to_nearest_resolution_acceptable_by_vae(downscaled_height, downscaled_width)
47
 
48
- condition = image is not None or video is not None
49
- if video:
50
- frames_to_use = 21
51
  video = load_video(video)[:frames_to_use]
52
- else:
 
53
  video = [image]
 
 
 
54
 
55
- if condition and (not t2v):
56
  condition1 = LTXVideoCondition(video=video, frame_index=0)
57
  latents = pipe(
58
  conditions=condition1,
@@ -158,20 +171,19 @@ function refresh() {
158
  with gr.Blocks(css=css, theme=gr.themes.Ocean()) as demo:
159
 
160
  gr.Markdown("# LTX Video 0.9.7 Distilled")
161
-
162
  with gr.Row():
163
  with gr.Column():
164
  with gr.Group():
165
- with gr.Tab("text-to-video"):
166
  image = gr.Image(label="", visible=False)
167
  #prompt = gr.Textbox(label="prompt")
168
- with gr.Tab("image-to-video"):
169
  image = gr.Image(label="")
170
  #prompt = gr.Textbox(label="prompt")
171
- with gr.Tab("video-to-video"):
172
  video = gr.Video(label="")
173
  prompt = gr.Textbox(label="prompt")
174
- t2v = gr.Checkbox(label="t2v", value=False)
175
  run_button = gr.Button()
176
  with gr.Column():
177
  output = gr.Video(interactive=False)
@@ -184,20 +196,26 @@ with gr.Blocks(css=css, theme=gr.themes.Ocean()) as demo:
184
  randomize_seed = gr.Checkbox(label="randomize seed")
185
  with gr.Row():
186
  steps = gr.Slider(label="Steps", minimum=1, maximum=30, value=8, step=1)
187
- num_frames = gr.Slider(label="# frames", minimum=1, maximum=30, value=8, step=1)
188
 
189
 
190
-
 
 
 
191
  run_button.click(fn=generate,
192
  inputs=[prompt,
193
  negative_prompt,
194
  image,
195
- video,
 
196
  steps,
197
  num_frames,
198
  seed,
199
- randomize_seed, t2v],
200
  outputs=[output])
 
 
201
 
202
 
203
  demo.launch()
 
23
  height = height - (height % pipe.vae_temporal_compression_ratio)
24
  width = width - (width % pipe.vae_temporal_compression_ratio)
25
  return height, width
26
+
27
+ def change_mode_to_text():
28
+ return gr.update(value="text-to-video")
29
+
30
+ def change_mode_to_image():
31
+ return gr.update(value="image-to-video")
32
+
33
+ def change_mode_to_video():
34
+ return gr.update(value="video-to-video")
35
 
36
  @spaces.GPU
37
  def generate(prompt,
38
  negative_prompt,
39
  image,
40
  video,
41
+ mode,
42
  steps,
43
  num_frames,
44
  seed,
45
  randomize_seed,
46
+ improve_texture=False, progress=gr.Progress(track_tqdm=True)):
47
 
48
  if randomize_seed:
49
  seed = random.randint(0, MAX_SEED)
50
 
51
  # Part 1. Generate video at smaller resolution
52
  # Text-only conditioning is also supported without the need to pass `conditions`
53
+ expected_height, expected_width = 768, 1152 #todo make configurable
54
  downscale_factor = 2 / 3
55
  downscaled_height, downscaled_width = int(expected_height * downscale_factor), int(expected_width * downscale_factor)
56
  downscaled_height, downscaled_width = round_to_nearest_resolution_acceptable_by_vae(downscaled_height, downscaled_width)
57
 
58
+ if mode == "text-to-video" and video is not None:
59
+ frames_to_use = 21 #todo make configurable
 
60
  video = load_video(video)[:frames_to_use]
61
+ condition = True
62
+ elif mode == "image-to-video" and image is not None:
63
  video = [image]
64
+ condition = True
65
+ else:
66
+ condition=False
67
 
68
+ if condition:
69
  condition1 = LTXVideoCondition(video=video, frame_index=0)
70
  latents = pipe(
71
  conditions=condition1,
 
171
  with gr.Blocks(css=css, theme=gr.themes.Ocean()) as demo:
172
 
173
  gr.Markdown("# LTX Video 0.9.7 Distilled")
174
+ mode = gr.State(value="text-to-video")
175
  with gr.Row():
176
  with gr.Column():
177
  with gr.Group():
178
+ with gr.Tab("text-to-video") as text_tab:
179
  image = gr.Image(label="", visible=False)
180
  #prompt = gr.Textbox(label="prompt")
181
+ with gr.Tab("image-to-video") as image_tab:
182
  image = gr.Image(label="")
183
  #prompt = gr.Textbox(label="prompt")
184
+ with gr.Tab("video-to-video") as video_tab:
185
  video = gr.Video(label="")
186
  prompt = gr.Textbox(label="prompt")
 
187
  run_button = gr.Button()
188
  with gr.Column():
189
  output = gr.Video(interactive=False)
 
196
  randomize_seed = gr.Checkbox(label="randomize seed")
197
  with gr.Row():
198
  steps = gr.Slider(label="Steps", minimum=1, maximum=30, value=8, step=1)
199
+ num_frames = gr.Slider(label="# frames", minimum=1, maximum=161, value=96, step=1)
200
 
201
 
202
+ text_tab.select(fn=change_mode_to_text, inputs=[], outputs=[mode])
203
+ image_tab.select(fn=change_mode_to_image, inputs=[], outputs=[mode])
204
+ video_tab.select(fn=change_mode_to_video, inputs=[], outputs=[mode])
205
+
206
  run_button.click(fn=generate,
207
  inputs=[prompt,
208
  negative_prompt,
209
  image,
210
+ video,
211
+ mode,
212
  steps,
213
  num_frames,
214
  seed,
215
+ randomize_seed],
216
  outputs=[output])
217
+
218
+
219
 
220
 
221
  demo.launch()