Update pipeline.py
Browse files- pipeline.py +3 -7
pipeline.py
CHANGED
@@ -972,9 +972,6 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
972 |
if context_group == 0:current_context_start = 0
|
973 |
else:current_context_start = context_group * (context_size - overlap)
|
974 |
|
975 |
-
# print current start, total frames, and context size, and if end frame is greater than total frames
|
976 |
-
print(f"Current context start: {current_context_start}, total frames: {num_frames}, context size: {context_size}, end frame: {current_context_start + context_size}")
|
977 |
-
|
978 |
# select the relevent context from the latents
|
979 |
current_context_latents = latents[:, :, current_context_start : current_context_start + context_size, :, :]
|
980 |
|
@@ -982,11 +979,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
982 |
|
983 |
# if context_start + context_size > num_frames: append the remaining frames from the start of the latents
|
984 |
if wrap_count > 0:
|
985 |
-
print(f"Appending {wrap_count} frames from the start of the latents")
|
986 |
current_context_latents = torch.cat([current_context_latents, latents[:, :, :wrap_count, :, :]], dim=2)
|
987 |
-
|
988 |
-
# print number of frames in the context
|
989 |
-
print(f"Number of frames in the context: {current_context_latents.shape[2]}")
|
990 |
|
991 |
# expand the latents if we are doing classifier free guidance
|
992 |
latent_model_input = torch.cat([current_context_latents] * 2) if do_classifier_free_guidance else current_context_latents
|
@@ -1012,6 +1005,9 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1012 |
current_context_latents = current_context_latents[:, :, :-wrap_count, :, :]
|
1013 |
# remove the ending frames from noise_pred
|
1014 |
noise_pred = noise_pred[:, :, :-wrap_count, :, :]
|
|
|
|
|
|
|
1015 |
|
1016 |
# compute the previous noisy sample x_t -> x_t-1
|
1017 |
current_context_latents = self.scheduler.step(noise_pred, t, current_context_latents, **extra_step_kwargs).prev_sample
|
|
|
972 |
if context_group == 0:current_context_start = 0
|
973 |
else:current_context_start = context_group * (context_size - overlap)
|
974 |
|
|
|
|
|
|
|
975 |
# select the relevent context from the latents
|
976 |
current_context_latents = latents[:, :, current_context_start : current_context_start + context_size, :, :]
|
977 |
|
|
|
979 |
|
980 |
# if context_start + context_size > num_frames: append the remaining frames from the start of the latents
|
981 |
if wrap_count > 0:
|
|
|
982 |
current_context_latents = torch.cat([current_context_latents, latents[:, :, :wrap_count, :, :]], dim=2)
|
|
|
|
|
|
|
983 |
|
984 |
# expand the latents if we are doing classifier free guidance
|
985 |
latent_model_input = torch.cat([current_context_latents] * 2) if do_classifier_free_guidance else current_context_latents
|
|
|
1005 |
current_context_latents = current_context_latents[:, :, :-wrap_count, :, :]
|
1006 |
# remove the ending frames from noise_pred
|
1007 |
noise_pred = noise_pred[:, :, :-wrap_count, :, :]
|
1008 |
+
# print the shape of the current_context_latents and noise_pred
|
1009 |
+
print(f"current_context_latents shape: {current_context_latents.shape}")
|
1010 |
+
print(f"noise_pred shape: {noise_pred.shape}")
|
1011 |
|
1012 |
# compute the previous noisy sample x_t -> x_t-1
|
1013 |
current_context_latents = self.scheduler.step(noise_pred, t, current_context_latents, **extra_step_kwargs).prev_sample
|