|
from typing import Any, Callable, Dict, List, Optional, Union |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback |
|
from diffusers.image_processor import PipelineImageInput |
|
from diffusers.models import AutoencoderKL, UNet2DConditionModel |
|
from diffusers.pipelines.stable_diffusion.pipeline_output import ( |
|
StableDiffusionPipelineOutput, |
|
) |
|
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import ( |
|
StableDiffusionPipeline, |
|
rescale_noise_cfg, |
|
retrieve_timesteps, |
|
) |
|
from diffusers.pipelines.stable_diffusion.safety_checker import ( |
|
StableDiffusionSafetyChecker, |
|
) |
|
from diffusers.schedulers import KarrasDiffusionSchedulers |
|
from diffusers.utils import deprecate |
|
from transformers import ( |
|
CLIPImageProcessor, |
|
CLIPTextModel, |
|
CLIPTokenizer, |
|
CLIPVisionModel, |
|
) |
|
|
|
from attention_processor import add_imagedream_attn_processor |
|
from camera_utils import get_camera |
|
|
|
|
|
class ImageDreamPipeline(StableDiffusionPipeline): |
|
def __init__( |
|
self, |
|
vae: AutoencoderKL, |
|
text_encoder: CLIPTextModel, |
|
tokenizer: CLIPTokenizer, |
|
unet: UNet2DConditionModel, |
|
scheduler: KarrasDiffusionSchedulers, |
|
safety_checker: StableDiffusionSafetyChecker, |
|
feature_extractor: CLIPImageProcessor, |
|
image_encoder: CLIPVisionModel = None, |
|
requires_safety_checker: bool = False, |
|
) -> None: |
|
super().__init__( |
|
vae=vae, |
|
text_encoder=text_encoder, |
|
tokenizer=tokenizer, |
|
unet=add_imagedream_attn_processor(unet), |
|
scheduler=scheduler, |
|
safety_checker=safety_checker, |
|
feature_extractor=feature_extractor, |
|
image_encoder=image_encoder, |
|
requires_safety_checker=requires_safety_checker, |
|
) |
|
self.num_views = 4 |
|
|
|
def load_ip_adapter( |
|
self, |
|
pretrained_model_name_or_path_or_dict: Union[ |
|
str, List[str], Dict[str, torch.Tensor] |
|
], |
|
subfolder: Union[str, List[str]], |
|
weight_name: Union[str, List[str]], |
|
image_encoder_folder: Optional[str] = "image_encoder", |
|
**kwargs, |
|
): |
|
super().load_ip_adapter( |
|
pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, |
|
subfolder=subfolder, |
|
weight_name=weight_name, |
|
image_encoder_folder=image_encoder_folder, |
|
**kwargs, |
|
) |
|
add_imagedream_attn_processor(self.unet) |
|
|
|
def encode_image_to_latents( |
|
self, |
|
image: PipelineImageInput, |
|
height: int, |
|
width: int, |
|
device: torch.device, |
|
num_images_per_prompt: int = 1, |
|
): |
|
dtype = next(self.vae.parameters()).dtype |
|
|
|
if isinstance(image, torch.Tensor): |
|
image = F.interpolate( |
|
image, |
|
(height, width), |
|
mode="bilinear", |
|
align_corners=False, |
|
antialias=True, |
|
) |
|
else: |
|
image = self.image_processor.preprocess(image, height, width) |
|
|
|
|
|
image = image.to(device=device, dtype=dtype) |
|
|
|
def vae_encode(image): |
|
posterior = self.vae.encode(image).latent_dist |
|
latents = posterior.sample() * self.vae.config.scaling_factor |
|
latents = latents.repeat_interleave(num_images_per_prompt, dim=0) |
|
return latents |
|
|
|
latents = vae_encode(image) |
|
uncond_latents = vae_encode(torch.zeros_like(image)) |
|
return latents, uncond_latents |
|
|
|
@torch.no_grad() |
|
def __call__( |
|
self, |
|
prompt: Union[str, List[str]] = None, |
|
height: Optional[int] = None, |
|
width: Optional[int] = None, |
|
num_inference_steps: int = 50, |
|
elevation: float = 0.0, |
|
timesteps: List[int] = None, |
|
sigmas: List[float] = None, |
|
guidance_scale: float = 7.5, |
|
negative_prompt: Optional[Union[str, List[str]]] = None, |
|
num_images_per_prompt: Optional[int] = 1, |
|
eta: float = 0.0, |
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
|
latents: Optional[torch.Tensor] = None, |
|
prompt_embeds: Optional[torch.Tensor] = None, |
|
negative_prompt_embeds: Optional[torch.Tensor] = None, |
|
ip_adapter_image: Optional[PipelineImageInput] = None, |
|
|
|
ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, |
|
output_type: Optional[str] = "pil", |
|
return_dict: bool = True, |
|
cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
|
guidance_rescale: float = 0.0, |
|
clip_skip: Optional[int] = None, |
|
callback_on_step_end: Optional[ |
|
Union[ |
|
Callable[[int, int, Dict], None], |
|
PipelineCallback, |
|
MultiPipelineCallbacks, |
|
] |
|
] = None, |
|
callback_on_step_end_tensor_inputs: List[str] = ["latents"], |
|
**kwargs, |
|
): |
|
if ip_adapter_image_embeds is not None: |
|
raise ValueError( |
|
"do not use `ip_adapter_image_embeds` in ImageDream, use `ip_adapter_image`" |
|
) |
|
|
|
callback = kwargs.pop("callback", None) |
|
callback_steps = kwargs.pop("callback_steps", None) |
|
|
|
if callback is not None: |
|
deprecate( |
|
"callback", |
|
"1.0.0", |
|
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", |
|
) |
|
if callback_steps is not None: |
|
deprecate( |
|
"callback_steps", |
|
"1.0.0", |
|
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", |
|
) |
|
|
|
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): |
|
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs |
|
|
|
|
|
if cross_attention_kwargs is None: |
|
num_views = self.num_views |
|
else: |
|
cross_attention_kwargs.pop("num_views", self.num_views) |
|
|
|
|
|
height = height or self.unet.config.sample_size * self.vae_scale_factor |
|
width = width or self.unet.config.sample_size * self.vae_scale_factor |
|
|
|
|
|
|
|
if prompt is None: |
|
prompt = "" |
|
self.check_inputs( |
|
prompt, |
|
height, |
|
width, |
|
callback_steps, |
|
negative_prompt, |
|
prompt_embeds, |
|
negative_prompt_embeds, |
|
ip_adapter_image, |
|
None, |
|
callback_on_step_end_tensor_inputs, |
|
) |
|
|
|
self._guidance_scale = guidance_scale |
|
self._guidance_rescale = guidance_rescale |
|
self._clip_skip = clip_skip |
|
self._cross_attention_kwargs = cross_attention_kwargs |
|
self._interrupt = False |
|
|
|
|
|
if prompt is not None and isinstance(prompt, str): |
|
batch_size = 1 |
|
elif prompt is not None and isinstance(prompt, list): |
|
batch_size = len(prompt) |
|
else: |
|
batch_size = prompt_embeds.shape[0] |
|
|
|
device = self._execution_device |
|
|
|
|
|
lora_scale = ( |
|
self.cross_attention_kwargs.get("scale", None) |
|
if self.cross_attention_kwargs is not None |
|
else None |
|
) |
|
|
|
prompt_embeds, negative_prompt_embeds = self.encode_prompt( |
|
prompt, |
|
device, |
|
num_images_per_prompt, |
|
self.do_classifier_free_guidance, |
|
negative_prompt, |
|
prompt_embeds=prompt_embeds, |
|
negative_prompt_embeds=negative_prompt_embeds, |
|
lora_scale=lora_scale, |
|
clip_skip=self.clip_skip, |
|
) |
|
|
|
|
|
camera = get_camera( |
|
num_views, elevation=elevation, extra_view=ip_adapter_image is not None |
|
).to(dtype=prompt_embeds.dtype, device=device) |
|
camera = camera.repeat(batch_size * num_images_per_prompt, 1) |
|
|
|
if ip_adapter_image is not None: |
|
image_embeds = self.prepare_ip_adapter_image_embeds( |
|
ip_adapter_image, |
|
None, |
|
device, |
|
batch_size * num_images_per_prompt, |
|
self.do_classifier_free_guidance, |
|
) |
|
|
|
image_latents, negative_image_latents = self.encode_image_to_latents( |
|
ip_adapter_image, |
|
height, |
|
width, |
|
device, |
|
batch_size * num_images_per_prompt, |
|
) |
|
num_views += 1 |
|
|
|
|
|
|
|
|
|
if self.do_classifier_free_guidance: |
|
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) |
|
camera = torch.cat([camera] * 2) |
|
if ip_adapter_image is not None: |
|
image_latents = torch.cat([negative_image_latents, image_latents]) |
|
|
|
|
|
prompt_embeds = prompt_embeds.repeat_interleave(num_views, dim=0) |
|
if ip_adapter_image is not None: |
|
image_embeds = [i.repeat_interleave(num_views, dim=0) for i in image_embeds] |
|
|
|
|
|
timesteps, num_inference_steps = retrieve_timesteps( |
|
self.scheduler, num_inference_steps, device, timesteps, sigmas |
|
) |
|
|
|
|
|
num_channels_latents = self.unet.config.in_channels |
|
latents = self.prepare_latents( |
|
batch_size * num_images_per_prompt * num_views, |
|
num_channels_latents, |
|
height, |
|
width, |
|
prompt_embeds.dtype, |
|
device, |
|
generator, |
|
latents, |
|
) |
|
|
|
|
|
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
|
|
|
|
|
if ip_adapter_image is not None: |
|
added_cond_kwargs = {"image_embeds": image_embeds} |
|
else: |
|
added_cond_kwargs = None |
|
|
|
|
|
timestep_cond = None |
|
if self.unet.config.time_cond_proj_dim is not None: |
|
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat( |
|
batch_size * num_images_per_prompt |
|
) |
|
timestep_cond = self.get_guidance_scale_embedding( |
|
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim |
|
).to(device=device, dtype=latents.dtype) |
|
|
|
cross_attention_kwargs = {"num_views": num_views} |
|
if self.cross_attention_kwargs is not None: |
|
cross_attention_kwargs.update(self.cross_attention_kwargs) |
|
|
|
|
|
|
|
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order |
|
self._num_timesteps = len(timesteps) |
|
with self.progress_bar(total=num_inference_steps) as progress_bar: |
|
for i, t in enumerate(timesteps): |
|
if self.interrupt: |
|
continue |
|
|
|
|
|
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents |
|
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
|
|
|
if ip_adapter_image is not None: |
|
latent_model_input[num_views - 1 :: num_views, :, :, :] = image_latents |
|
|
|
noise_pred = self.unet( |
|
latent_model_input, |
|
t, |
|
class_labels=camera, |
|
encoder_hidden_states=prompt_embeds, |
|
timestep_cond=timestep_cond, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
added_cond_kwargs=added_cond_kwargs, |
|
return_dict=False, |
|
)[0] |
|
|
|
|
|
if self.do_classifier_free_guidance: |
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = torch.lerp(noise_pred_uncond, noise_pred_text, self.guidance_scale) |
|
|
|
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: |
|
|
|
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) |
|
|
|
|
|
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] |
|
|
|
if callback_on_step_end is not None: |
|
callback_kwargs = {} |
|
for k in callback_on_step_end_tensor_inputs: |
|
callback_kwargs[k] = locals()[k] |
|
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) |
|
|
|
latents = callback_outputs.pop("latents", latents) |
|
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) |
|
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) |
|
|
|
|
|
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): |
|
progress_bar.update() |
|
if callback is not None and i % callback_steps == 0: |
|
step_idx = i // getattr(self.scheduler, "order", 1) |
|
callback(step_idx, t, latents) |
|
|
|
if not output_type == "latent": |
|
image = self.vae.decode( |
|
latents / self.vae.config.scaling_factor, |
|
return_dict=False, |
|
generator=generator, |
|
)[0] |
|
image, has_nsfw_concept = self.run_safety_checker( |
|
image, device, prompt_embeds.dtype |
|
) |
|
else: |
|
image = latents |
|
has_nsfw_concept = None |
|
|
|
if has_nsfw_concept is None: |
|
do_denormalize = [True] * image.shape[0] |
|
else: |
|
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] |
|
|
|
image = self.image_processor.postprocess( |
|
image, output_type=output_type, do_denormalize=do_denormalize |
|
) |
|
|
|
|
|
self.maybe_free_model_hooks() |
|
|
|
if not return_dict: |
|
return (image, has_nsfw_concept) |
|
|
|
return StableDiffusionPipelineOutput( |
|
images=image, nsfw_content_detected=has_nsfw_concept |
|
) |
|
|