Update pipeline.py
Browse files- pipeline.py +5 -5
pipeline.py
CHANGED
@@ -920,7 +920,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
920 |
returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
|
921 |
"""
|
922 |
|
923 |
-
if controlnet != None:
|
924 |
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
925 |
|
926 |
# align format for control guidance
|
@@ -957,7 +957,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
957 |
|
958 |
device = self._execution_device
|
959 |
|
960 |
-
if controlnet != None:
|
961 |
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
962 |
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
963 |
|
@@ -1003,7 +1003,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1003 |
if do_classifier_free_guidance:
|
1004 |
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
1005 |
|
1006 |
-
if controlnet != None:
|
1007 |
if isinstance(controlnet, ControlNetModel):
|
1008 |
conditioning_frames = self.prepare_image(
|
1009 |
image=conditioning_frames,
|
@@ -1125,7 +1125,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1125 |
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
|
1126 |
|
1127 |
# 7.1 Create tensor stating which controlnets to keep
|
1128 |
-
if controlnet != None:
|
1129 |
controlnet_keep = []
|
1130 |
for i in range(len(timesteps)):
|
1131 |
keeps = [
|
@@ -1192,7 +1192,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1192 |
|
1193 |
|
1194 |
|
1195 |
-
if controlnet != None:
|
1196 |
if guess_mode and self.do_classifier_free_guidance:
|
1197 |
# Infer ControlNet only for the conditional batch.
|
1198 |
control_model_input = latents
|
|
|
920 |
returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
|
921 |
"""
|
922 |
|
923 |
+
if self.controlnet != None:
|
924 |
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
925 |
|
926 |
# align format for control guidance
|
|
|
957 |
|
958 |
device = self._execution_device
|
959 |
|
960 |
+
if self.controlnet != None:
|
961 |
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
962 |
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
963 |
|
|
|
1003 |
if do_classifier_free_guidance:
|
1004 |
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
1005 |
|
1006 |
+
if self.controlnet != None:
|
1007 |
if isinstance(controlnet, ControlNetModel):
|
1008 |
conditioning_frames = self.prepare_image(
|
1009 |
image=conditioning_frames,
|
|
|
1125 |
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
|
1126 |
|
1127 |
# 7.1 Create tensor stating which controlnets to keep
|
1128 |
+
if self.controlnet != None:
|
1129 |
controlnet_keep = []
|
1130 |
for i in range(len(timesteps)):
|
1131 |
keeps = [
|
|
|
1192 |
|
1193 |
|
1194 |
|
1195 |
+
if self.controlnet != None:
|
1196 |
if guess_mode and self.do_classifier_free_guidance:
|
1197 |
# Infer ControlNet only for the conditional batch.
|
1198 |
control_model_input = latents
|