Update pipeline.py
Browse files- pipeline.py +2 -1
pipeline.py
CHANGED
@@ -1540,7 +1540,8 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1540 |
# if we had more then one prompt, we need to offset the video frames back by number of inference steps
|
1541 |
if len(prompt_embeds_list) > 1:
|
1542 |
# wrap the first n number of frames to the end of the video to fix the offseting from the context scheduler
|
1543 |
-
|
|
|
1544 |
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
|
1545 |
for f_id, frame in enumerate(video[0]):
|
1546 |
frame.save(frame_format.format(start_id + f_id))
|
|
|
1540 |
# if we had more then one prompt, we need to offset the video frames back by number of inference steps
|
1541 |
if len(prompt_embeds_list) > 1:
|
1542 |
# wrap the first n number of frames to the end of the video to fix the offseting from the context scheduler
|
1543 |
+
offset = num_inference_steps * 2
|
1544 |
+
video_tensor = torch.cat((video_tensor[:, :, offset:, :, :], video_tensor[:, :, :offset, :, :]), dim=2)
|
1545 |
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
|
1546 |
for f_id, frame in enumerate(video[0]):
|
1547 |
frame.save(frame_format.format(start_id + f_id))
|