Update pipeline.py
Browse files- pipeline.py +0 -12
pipeline.py
CHANGED
@@ -1169,8 +1169,6 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1169 |
# foreach prompt embed
|
1170 |
|
1171 |
prompt_embeds_list = []
|
1172 |
-
# print shape of prompt_embeds
|
1173 |
-
print(prompt_embeds.shape)
|
1174 |
for p in range(num_prompts):
|
1175 |
single_prompt_embeds, single_negative_prompt_embeds = self.encode_prompt(
|
1176 |
prompt,
|
@@ -1189,14 +1187,9 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1189 |
# to avoid doing two forward passes
|
1190 |
if do_classifier_free_guidance:
|
1191 |
# concatenate negative prompt embeddings with prompt embeddings on a new dimension after the first batch dimension
|
1192 |
-
# print shape of single_prompt_embeds
|
1193 |
-
print("single prompt embes shape ",single_prompt_embeds.shape)
|
1194 |
single_prompt_embeds = torch.cat([single_negative_prompt_embeds, single_prompt_embeds])
|
1195 |
|
1196 |
prompt_embeds_list.append(single_prompt_embeds)
|
1197 |
-
# print shape of single prompt embeds
|
1198 |
-
print(single_prompt_embeds.shape)
|
1199 |
-
|
1200 |
|
1201 |
if ip_adapter_image is not None:
|
1202 |
output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
|
@@ -1441,11 +1434,6 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
1441 |
context_position = current_context_indexes[0] % context_size
|
1442 |
current_prompt_index = int(context_position / (context_size / num_prompts))
|
1443 |
|
1444 |
-
# print shape
|
1445 |
-
print(prompt_embeds_list[current_prompt_index].shape)
|
1446 |
-
# print min and max values of the current prompt embed
|
1447 |
-
print("avg", torch.mean(prompt_embeds_list[current_prompt_index][1]))
|
1448 |
-
|
1449 |
# 7 Add image embeds for IP-Adapter
|
1450 |
added_cond_kwargs = {"image_embeds": image_embeds[min(current_prompt_index, len(image_embeds) - 1)]} if ip_adapter_image is not None else None
|
1451 |
|
|
|
1169 |
# foreach prompt embed
|
1170 |
|
1171 |
prompt_embeds_list = []
|
|
|
|
|
1172 |
for p in range(num_prompts):
|
1173 |
single_prompt_embeds, single_negative_prompt_embeds = self.encode_prompt(
|
1174 |
prompt,
|
|
|
1187 |
# to avoid doing two forward passes
|
1188 |
if do_classifier_free_guidance:
|
1189 |
# concatenate negative prompt embeddings with prompt embeddings on a new dimension after the first batch dimension
|
|
|
|
|
1190 |
single_prompt_embeds = torch.cat([single_negative_prompt_embeds, single_prompt_embeds])
|
1191 |
|
1192 |
prompt_embeds_list.append(single_prompt_embeds)
|
|
|
|
|
|
|
1193 |
|
1194 |
if ip_adapter_image is not None:
|
1195 |
output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
|
|
|
1434 |
context_position = current_context_indexes[0] % context_size
|
1435 |
current_prompt_index = int(context_position / (context_size / num_prompts))
|
1436 |
|
|
|
|
|
|
|
|
|
|
|
1437 |
# 7 Add image embeds for IP-Adapter
|
1438 |
added_cond_kwargs = {"image_embeds": image_embeds[min(current_prompt_index, len(image_embeds) - 1)]} if ip_adapter_image is not None else None
|
1439 |
|