smoothieAI commited on
Commit
ad2c80d
·
verified ·
1 Parent(s): 774e71b

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +4 -6
pipeline.py CHANGED
@@ -519,7 +519,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
519
 
520
  # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents
521
  def prepare_latents(
522
- self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
523
  ):
524
  shape = (
525
  batch_size,
@@ -573,10 +573,8 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
573
  weight = (smooth_steps - abs(s)) / smooth_steps
574
  blended_latent += latents[:, :, frame_index] * weight
575
  latents[:, :, i] = blended_latent / (2 * smooth_steps)
576
- # just for testing make all frames have the same latent noise as frame 1
577
- single_latent = randn_tensor((batch_size, num_channels_latents, 1, height // self.vae_scale_factor, width // self.vae_scale_factor), generator=generator, device=device, dtype=dtype)
578
- latents = single_latent.repeat(1, 1, num_frames, 1, 1)
579
-
580
  else:
581
  latents = latents.to(device)
582
 
@@ -783,7 +781,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
783
  latents = latents.to(device)
784
  return latents, init_latents
785
 
786
-
787
  @torch.no_grad()
788
  # @replace_example_docstring(EXAMPLE_DOC_STRING)
789
  def __call__(
 
519
 
520
  # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents
521
  def prepare_latents(
522
+ self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None, smooth_weight=0.5
523
  ):
524
  shape = (
525
  batch_size,
 
573
  weight = (smooth_steps - abs(s)) / smooth_steps
574
  blended_latent += latents[:, :, frame_index] * weight
575
  latents[:, :, i] = blended_latent / (2 * smooth_steps)
576
+
577
+ latents = torch.lerp(randn_tensor(shape, generator=generator, device=device, dtype=dtype),latents, smooth_weight)
 
 
578
  else:
579
  latents = latents.to(device)
580
 
 
781
  latents = latents.to(device)
782
  return latents, init_latents
783
 
784
+
785
  @torch.no_grad()
786
  # @replace_example_docstring(EXAMPLE_DOC_STRING)
787
  def __call__(