This PR allows the user to automatically randomize the seed

#1
Files changed (1) hide show
  1. demo.py +15 -11
demo.py CHANGED
@@ -132,8 +132,10 @@ def prepare_image(image, vae, transform_video, device, dtype=torch.float16):
132
 
133
 
134
  @spaces.GPU
135
- def gen_video(input_image, prompt, negative_prompt, diffusion_step, height, width, scfg_scale, use_dctinit, dct_coefficients, noise_level, motion_bucket_id, seed):
136
 
 
 
137
  torch.manual_seed(seed)
138
 
139
  scheduler = DDIMScheduler.from_pretrained(args.pretrained_model_path,
@@ -248,6 +250,7 @@ with gr.Blocks() as demo:
248
  sample_step_slider = gr.Slider(label="Sampling steps", value=50, minimum=10, maximum=250, step=1)
249
 
250
  with gr.Row():
 
251
  seed_textbox = gr.Slider(label="Seed", value=100, minimum=1, maximum=int(1e8), step=1, interactive=True)
252
  # seed_textbox = gr.Textbox(label="Seed", value=100)
253
  # seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
@@ -270,22 +273,22 @@ with gr.Blocks() as demo:
270
  input_image_path.submit(fn=update_and_resize_image, inputs=[input_image_path, height, width], outputs=[input_image])
271
 
272
  EXAMPLES = [
273
- ["./example/red_panda_eating_bamboo/0.jpg", "red panda eating bamboo" , "low quality", 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
274
- ["./example/fireworks/0.jpg", "fireworks" , "low quality", 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
275
- ["./example/flowers_swaying/0.jpg", "flowers swaying" , "", 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
276
- ["./example/girl_walking_on_the_beach/0.jpg", "girl walking on the beach" , "low quality, background changing", 50, 320, 512, 7.5, True, 0.25, 995, 10, 49494220],
277
- ["./example/house_rotating/0.jpg", "house rotating" , "low quality", 50, 320, 512, 7.5, True, 0.23, 985, 10, 46640174],
278
- ["./example/people_runing/0.jpg", "people runing" , "low quality, background changing", 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
279
- ["./example/shark_swimming/0.jpg", "shark swimming" , "", 50, 320, 512, 7.5, True, 0.23, 975, 10, 32947978],
280
- ["./example/car_moving/0.jpg", "car moving" , "", 50, 320, 512, 7.5, True, 0.23, 975, 10, 75469653],
281
- ["./example/windmill_turning/0.jpg", "windmill turning" , "background changing", 50, 320, 512, 7.5, True, 0.21, 975, 10, 89378613],
282
  ]
283
 
284
 
285
  examples = gr.Examples(
286
  examples = EXAMPLES,
287
  fn = gen_video,
288
- inputs=[input_image, prompt_textbox, negative_prompt_textbox, sample_step_slider, height, width, txt_cfg_scale, use_dctinit, dct_coefficients, noise_level, motion_bucket_id, seed_textbox],
289
  outputs=[result_video],
290
  cache_examples=True,
291
  # cache_examples="lazy",
@@ -305,6 +308,7 @@ with gr.Blocks() as demo:
305
  dct_coefficients,
306
  noise_level,
307
  motion_bucket_id,
 
308
  seed_textbox,
309
  ],
310
  outputs=[result_video]
 
132
 
133
 
134
  @spaces.GPU
135
+ def gen_video(input_image, prompt, negative_prompt, diffusion_step, height, width, scfg_scale, use_dctinit, dct_coefficients, noise_level, motion_bucket_id, randomize_seed, seed):
136
 
137
+ if randomize_seed:
138
+ seed = random.randint(1, int(1e8))
139
  torch.manual_seed(seed)
140
 
141
  scheduler = DDIMScheduler.from_pretrained(args.pretrained_model_path,
 
250
  sample_step_slider = gr.Slider(label="Sampling steps", value=50, minimum=10, maximum=250, step=1)
251
 
252
  with gr.Row():
253
+ randomize_seed_checkbox = gr.Checkbox(label = "Randomize seed", value = True, info = "If checked, result is always different")
254
  seed_textbox = gr.Slider(label="Seed", value=100, minimum=1, maximum=int(1e8), step=1, interactive=True)
255
  # seed_textbox = gr.Textbox(label="Seed", value=100)
256
  # seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
 
273
  input_image_path.submit(fn=update_and_resize_image, inputs=[input_image_path, height, width], outputs=[input_image])
274
 
275
  EXAMPLES = [
276
+ ["./example/red_panda_eating_bamboo/0.jpg", "red panda eating bamboo" , "low quality", 50, 320, 512, 7.5, True, 0.23, 975, 10, False, 100],
277
+ ["./example/fireworks/0.jpg", "fireworks" , "low quality", 50, 320, 512, 7.5, True, 0.23, 975, 10, False, 100],
278
+ ["./example/flowers_swaying/0.jpg", "flowers swaying" , "", 50, 320, 512, 7.5, True, 0.23, 975, 10, False, 100],
279
+ ["./example/girl_walking_on_the_beach/0.jpg", "girl walking on the beach" , "low quality, background changing", 50, 320, 512, 7.5, True, 0.25, 995, 10, False, 49494220],
280
+ ["./example/house_rotating/0.jpg", "house rotating" , "low quality", 50, 320, 512, 7.5, True, 0.23, 985, 10, False, 46640174],
281
+ ["./example/people_runing/0.jpg", "people runing" , "low quality, background changing", 50, 320, 512, 7.5, True, 0.23, 975, 10, False, 100],
282
+ ["./example/shark_swimming/0.jpg", "shark swimming" , "", 50, 320, 512, 7.5, True, 0.23, 975, 10, False, 32947978],
283
+ ["./example/car_moving/0.jpg", "car moving" , "", 50, 320, 512, 7.5, True, 0.23, 975, 10, False, 75469653],
284
+ ["./example/windmill_turning/0.jpg", "windmill turning" , "background changing", 50, 320, 512, 7.5, True, 0.21, 975, 10, False, 89378613],
285
  ]
286
 
287
 
288
  examples = gr.Examples(
289
  examples = EXAMPLES,
290
  fn = gen_video,
291
+ inputs=[input_image, prompt_textbox, negative_prompt_textbox, sample_step_slider, height, width, txt_cfg_scale, use_dctinit, dct_coefficients, noise_level, motion_bucket_id, randomize_seed_checkbox, seed_textbox],
292
  outputs=[result_video],
293
  cache_examples=True,
294
  # cache_examples="lazy",
 
308
  dct_coefficients,
309
  noise_level,
310
  motion_bucket_id,
311
+ randomize_seed_checkbox,
312
  seed_textbox,
313
  ],
314
  outputs=[result_video]