Update pipeline.py
Browse files- pipeline.py +4 -4
pipeline.py
CHANGED
@@ -1172,14 +1172,14 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1172 |
# print shape of prompt_embeds
|
1173 |
print(prompt_embeds.shape)
|
1174 |
for p in range(num_prompts):
|
1175 |
-
|
1176 |
prompt,
|
1177 |
device,
|
1178 |
num_videos_per_prompt,
|
1179 |
do_classifier_free_guidance,
|
1180 |
negative_prompt,
|
1181 |
-
prompt_embeds=
|
1182 |
-
negative_prompt_embeds=
|
1183 |
lora_scale=text_encoder_lora_scale,
|
1184 |
clip_skip=clip_skip,
|
1185 |
)
|
@@ -1189,7 +1189,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1189 |
# to avoid doing two forward passes
|
1190 |
if do_classifier_free_guidance:
|
1191 |
# concatenate negative prompt embeddings with prompt embeddings on a new dimension after the first batch dimension
|
1192 |
-
prompt_embeds = torch.stack([
|
1193 |
|
1194 |
prompt_embeds_list.append(prompt_embeds)
|
1195 |
|
|
|
1172 |
# print shape of prompt_embeds
|
1173 |
print(prompt_embeds.shape)
|
1174 |
for p in range(num_prompts):
|
1175 |
+
single_prompt_embeds, single_negative_prompt_embeds = self.encode_prompt(
|
1176 |
prompt,
|
1177 |
device,
|
1178 |
num_videos_per_prompt,
|
1179 |
do_classifier_free_guidance,
|
1180 |
negative_prompt,
|
1181 |
+
prompt_embeds=single_prompt_embeds[p].unsqueeze(0),
|
1182 |
+
negative_prompt_embeds=single_negative_prompt_embeds[p].unsqueeze(0),
|
1183 |
lora_scale=text_encoder_lora_scale,
|
1184 |
clip_skip=clip_skip,
|
1185 |
)
|
|
|
1189 |
# to avoid doing two forward passes
|
1190 |
if do_classifier_free_guidance:
|
1191 |
# concatenate negative prompt embeddings with prompt embeddings on a new dimension after the first batch dimension
|
1192 |
+
prompt_embeds = torch.stack([single_negative_prompt_embeds, single_prompt_embeds], dim=1)
|
1193 |
|
1194 |
prompt_embeds_list.append(prompt_embeds)
|
1195 |
|