smoothieAI commited on
Commit
1698ec6
·
verified ·
1 Parent(s): 699de14

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +5 -0
pipeline.py CHANGED
@@ -1539,10 +1539,15 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
1539
  start_id = batch * output_batch_size
1540
  end_id = min((batch + 1) * output_batch_size, num_frames)
1541
  video_tensor = self.decode_latents(latents[:, :, start_id:end_id, :, :])
 
 
 
 
1542
  video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
1543
  for f_id, frame in enumerate(video[0]):
1544
  frame.save(frame_format.format(start_id + f_id))
1545
  return output_path
 
1546
 
1547
  # Post-processing
1548
  video_tensor = self.decode_latents(latents)
 
1539
  start_id = batch * output_batch_size
1540
  end_id = min((batch + 1) * output_batch_size, num_frames)
1541
  video_tensor = self.decode_latents(latents[:, :, start_id:end_id, :, :])
1542
+ # if we had more then one prompt, we need to offset the video frames back by number of inference steps
1543
+ if len(prompt_embeds_list) > 1:
1544
+ # wrap the first n number of frames to the end of the video to fix the offseting from the context scheduler
1545
+ video_tensor = torch.cat((video_tensor[:, :, num_inference_steps:, :, :], video_tensor[:, :, :num_inference_steps, :, :]), dim=2)
1546
  video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
1547
  for f_id, frame in enumerate(video[0]):
1548
  frame.save(frame_format.format(start_id + f_id))
1549
  return output_path
1550
+
1551
 
1552
  # Post-processing
1553
  video_tensor = self.decode_latents(latents)