Update pipeline.py
Browse files- pipeline.py +3 -1
pipeline.py
CHANGED
@@ -1189,7 +1189,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1189 |
# to avoid doing two forward passes
|
1190 |
if do_classifier_free_guidance:
|
1191 |
# concatenate negative prompt embeddings with prompt embeddings on a new dimension after the first batch dimension
|
1192 |
-
single_prompt_embeds = torch.stack([single_negative_prompt_embeds, single_prompt_embeds], dim=
|
1193 |
|
1194 |
prompt_embeds_list.append(single_prompt_embeds)
|
1195 |
|
@@ -1437,6 +1437,8 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1437 |
context_position = current_context_indexes[0] % context_size
|
1438 |
current_prompt_index = int(context_position / (context_size / num_prompts))
|
1439 |
|
|
|
|
|
1440 |
# print min and max values of the current prompt embed
|
1441 |
print("avg", torch.mean(prompt_embeds_list[current_prompt_index][1]))
|
1442 |
|
|
|
1189 |
# to avoid doing two forward passes
|
1190 |
if do_classifier_free_guidance:
|
1191 |
# concatenate negative prompt embeddings with prompt embeddings on a new dimension after the first batch dimension
|
1192 |
+
single_prompt_embeds = torch.stack([single_negative_prompt_embeds, single_prompt_embeds], dim=0)
|
1193 |
|
1194 |
prompt_embeds_list.append(single_prompt_embeds)
|
1195 |
|
|
|
1437 |
context_position = current_context_indexes[0] % context_size
|
1438 |
current_prompt_index = int(context_position / (context_size / num_prompts))
|
1439 |
|
1440 |
+
# print shape
|
1441 |
+
print(prompt_embeds_list[current_prompt_index].shape)
|
1442 |
# print min and max values of the current prompt embed
|
1443 |
print("avg", torch.mean(prompt_embeds_list[current_prompt_index][1]))
|
1444 |
|