Update pipeline.py
Browse files- pipeline.py +4 -4
pipeline.py
CHANGED
@@ -1178,8 +1178,8 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1178 |
num_videos_per_prompt,
|
1179 |
do_classifier_free_guidance,
|
1180 |
negative_prompt,
|
1181 |
-
prompt_embeds=
|
1182 |
-
negative_prompt_embeds=
|
1183 |
lora_scale=text_encoder_lora_scale,
|
1184 |
clip_skip=clip_skip,
|
1185 |
)
|
@@ -1189,9 +1189,9 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1189 |
# to avoid doing two forward passes
|
1190 |
if do_classifier_free_guidance:
|
1191 |
# concatenate negative prompt embeddings with prompt embeddings on a new dimension after the first batch dimension
|
1192 |
-
|
1193 |
|
1194 |
-
prompt_embeds_list.append(
|
1195 |
|
1196 |
|
1197 |
if ip_adapter_image is not None:
|
|
|
1178 |
num_videos_per_prompt,
|
1179 |
do_classifier_free_guidance,
|
1180 |
negative_prompt,
|
1181 |
+
prompt_embeds=prompt_embeds[p].unsqueeze(0),
|
1182 |
+
negative_prompt_embeds=negative_prompt_embeds[p].unsqueeze(0),
|
1183 |
lora_scale=text_encoder_lora_scale,
|
1184 |
clip_skip=clip_skip,
|
1185 |
)
|
|
|
1189 |
# to avoid doing two forward passes
|
1190 |
if do_classifier_free_guidance:
|
1191 |
# concatenate negative prompt embeddings with prompt embeddings on a new dimension after the first batch dimension
|
1192 |
+
single_prompt_embeds = torch.stack([single_negative_prompt_embeds, single_prompt_embeds], dim=1)
|
1193 |
|
1194 |
+
prompt_embeds_list.append(single_prompt_embeds)
|
1195 |
|
1196 |
|
1197 |
if ip_adapter_image is not None:
|