Update pipeline.py
Browse files- pipeline.py +4 -2
pipeline.py
CHANGED
@@ -852,8 +852,8 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
852 |
|
853 |
image = image.to(device=device, dtype=dtype)
|
854 |
|
855 |
-
if do_classifier_free_guidance and not guess_mode:
|
856 |
-
|
857 |
|
858 |
print("prepared control image_batch_size", image.shape)
|
859 |
print("prepared control device", image.device)
|
@@ -1265,6 +1265,8 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1265 |
if self.controlnet != None:
|
1266 |
|
1267 |
current_context_conditioning_frames = conditioning_frames[current_context_indexes, :, :, :]
|
|
|
|
|
1268 |
|
1269 |
if guess_mode and self.do_classifier_free_guidance:
|
1270 |
# Infer ControlNet only for the conditional batch.
|
|
|
852 |
|
853 |
image = image.to(device=device, dtype=dtype)
|
854 |
|
855 |
+
# if do_classifier_free_guidance and not guess_mode:
|
856 |
+
# image = torch.cat([image] * 2)
|
857 |
|
858 |
print("prepared control image_batch_size", image.shape)
|
859 |
print("prepared control device", image.device)
|
|
|
1265 |
if self.controlnet != None:
|
1266 |
|
1267 |
current_context_conditioning_frames = conditioning_frames[current_context_indexes, :, :, :]
|
1268 |
+
current_context_conditioning_frames = torch.cat([current_context_conditioning_frames] * 2) if do_classifier_free_guidance else current_context_conditioning_frames
|
1269 |
+
|
1270 |
|
1271 |
if guess_mode and self.do_classifier_free_guidance:
|
1272 |
# Infer ControlNet only for the conditional batch.
|