|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import inspect |
|
from typing import Callable, List, Optional, Union |
|
|
|
import numpy as np |
|
import paddle |
|
import PIL |
|
|
|
from paddlenlp.transformers import CLIPTextModel, CLIPTokenizer |
|
|
|
from ...models import AutoencoderKL, UNet2DConditionModel |
|
from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput |
|
from ...schedulers import ( |
|
DDIMScheduler, |
|
DDPMScheduler, |
|
LMSDiscreteScheduler, |
|
PNDMScheduler, |
|
) |
|
from ...utils import logging |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
def preprocess(image): |
|
if isinstance(image, paddle.Tensor): |
|
return image |
|
elif isinstance(image, PIL.Image.Image): |
|
image = [image] |
|
|
|
if isinstance(image[0], PIL.Image.Image): |
|
w, h = image[0].size |
|
w, h = map(lambda x: x - x % 64, (w, h)) |
|
|
|
image = [np.array(i.resize((w, h)))[None, :] for i in image] |
|
image = np.concatenate(image, axis=0) |
|
image = np.array(image).astype(np.float32) / 255.0 |
|
image = image.transpose(0, 3, 1, 2) |
|
image = 2.0 * image - 1.0 |
|
image = paddle.to_tensor(image) |
|
elif isinstance(image[0], paddle.Tensor): |
|
image = paddle.concat(image, axis=0) |
|
return image |
|
|
|
|
|
class StableDiffusionUpscalePipeline(DiffusionPipeline): |
|
r""" |
|
Pipeline for text-guided image super-resolution using Stable Diffusion 2. |
|
|
|
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the |
|
library implements for all the pipelines (such as downloading or saving etc.) |
|
|
|
Args: |
|
vae ([`AutoencoderKL`]): |
|
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. |
|
text_encoder ([`CLIPTextModel`]): |
|
Frozen text-encoder. Stable Diffusion uses the text portion of |
|
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically |
|
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. |
|
tokenizer (`CLIPTokenizer`): |
|
Tokenizer of class |
|
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). |
|
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. |
|
low_res_scheduler ([`SchedulerMixin`]): |
|
A scheduler used to add initial noise to the low res conditioning image. It must be an instance of |
|
[`DDPMScheduler`]. |
|
scheduler ([`SchedulerMixin`]): |
|
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of |
|
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
vae: AutoencoderKL, |
|
text_encoder: CLIPTextModel, |
|
tokenizer: CLIPTokenizer, |
|
unet: UNet2DConditionModel, |
|
low_res_scheduler: DDPMScheduler, |
|
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], |
|
max_noise_level: int = 350, |
|
): |
|
super().__init__() |
|
|
|
self.register_modules( |
|
vae=vae, |
|
text_encoder=text_encoder, |
|
tokenizer=tokenizer, |
|
unet=unet, |
|
low_res_scheduler=low_res_scheduler, |
|
scheduler=scheduler, |
|
) |
|
self.register_to_config(max_noise_level=max_noise_level) |
|
|
|
|
|
def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): |
|
r""" |
|
Encodes the prompt into text encoder hidden states. |
|
|
|
Args: |
|
prompt (`str` or `list(int)`): |
|
prompt to be encoded |
|
num_images_per_prompt (`int`): |
|
number of images that should be generated per prompt |
|
do_classifier_free_guidance (`bool`): |
|
whether to use classifier free guidance or not |
|
negative_prompt (`str` or `List[str]`): |
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored |
|
if `guidance_scale` is less than `1`). |
|
""" |
|
batch_size = len(prompt) if isinstance(prompt, list) else 1 |
|
|
|
text_inputs = self.tokenizer( |
|
prompt, |
|
padding="max_length", |
|
max_length=self.tokenizer.model_max_length, |
|
truncation=True, |
|
return_tensors="pd", |
|
) |
|
text_input_ids = text_inputs.input_ids |
|
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pd").input_ids |
|
|
|
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not paddle.equal_all( |
|
text_input_ids, untruncated_ids |
|
): |
|
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) |
|
logger.warning( |
|
"The following part of your input was truncated because CLIP can only handle sequences up to" |
|
f" {self.tokenizer.model_max_length} tokens: {removed_text}" |
|
) |
|
|
|
config = ( |
|
self.text_encoder.config |
|
if isinstance(self.text_encoder.config, dict) |
|
else self.text_encoder.config.to_dict() |
|
) |
|
if config.get("use_attention_mask", None) is not None and config["use_attention_mask"]: |
|
attention_mask = text_inputs.attention_mask |
|
else: |
|
attention_mask = None |
|
|
|
text_embeddings = self.text_encoder( |
|
text_input_ids, |
|
attention_mask=attention_mask, |
|
) |
|
text_embeddings = text_embeddings[0] |
|
|
|
|
|
bs_embed, seq_len, _ = text_embeddings.shape |
|
text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1]) |
|
text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1]) |
|
|
|
|
|
if do_classifier_free_guidance: |
|
uncond_tokens: List[str] |
|
if negative_prompt is None: |
|
uncond_tokens = [""] * batch_size |
|
elif type(prompt) is not type(negative_prompt): |
|
raise TypeError( |
|
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" |
|
f" {type(prompt)}." |
|
) |
|
elif isinstance(negative_prompt, str): |
|
uncond_tokens = [negative_prompt] |
|
elif batch_size != len(negative_prompt): |
|
raise ValueError( |
|
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" |
|
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" |
|
" the batch size of `prompt`." |
|
) |
|
else: |
|
uncond_tokens = negative_prompt |
|
|
|
max_length = text_input_ids.shape[-1] |
|
uncond_input = self.tokenizer( |
|
uncond_tokens, |
|
padding="max_length", |
|
max_length=max_length, |
|
truncation=True, |
|
return_tensors="pd", |
|
) |
|
|
|
if config.get("use_attention_mask", None) is not None and config["use_attention_mask"]: |
|
attention_mask = uncond_input.attention_mask |
|
else: |
|
attention_mask = None |
|
|
|
uncond_embeddings = self.text_encoder( |
|
uncond_input.input_ids, |
|
attention_mask=attention_mask, |
|
) |
|
uncond_embeddings = uncond_embeddings[0] |
|
|
|
|
|
seq_len = uncond_embeddings.shape[1] |
|
uncond_embeddings = uncond_embeddings.tile([1, num_images_per_prompt, 1]) |
|
uncond_embeddings = uncond_embeddings.reshape([batch_size * num_images_per_prompt, seq_len, -1]) |
|
|
|
|
|
|
|
|
|
text_embeddings = paddle.concat([uncond_embeddings, text_embeddings]) |
|
|
|
return text_embeddings |
|
|
|
|
|
def prepare_extra_step_kwargs(self, generator, eta): |
|
|
|
|
|
|
|
|
|
|
|
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
|
extra_step_kwargs = {} |
|
if accepts_eta: |
|
extra_step_kwargs["eta"] = eta |
|
|
|
|
|
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
|
if accepts_generator: |
|
extra_step_kwargs["generator"] = generator |
|
return extra_step_kwargs |
|
|
|
|
|
def decode_latents(self, latents): |
|
latents = 1 / 0.08333 * latents |
|
image = self.vae.decode(latents).sample |
|
image = (image / 2 + 0.5).clip(0, 1) |
|
|
|
image = image.transpose([0, 2, 3, 1]).cast("float32").numpy() |
|
return image |
|
|
|
def check_inputs(self, prompt, image, noise_level, callback_steps): |
|
if not isinstance(prompt, str) and not isinstance(prompt, list): |
|
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") |
|
|
|
if ( |
|
not isinstance(image, paddle.Tensor) |
|
and not isinstance(image, PIL.Image.Image) |
|
and not isinstance(image, list) |
|
): |
|
raise ValueError( |
|
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or `list` but is {type(image)}" |
|
) |
|
|
|
|
|
if isinstance(image, list) or isinstance(image, paddle.Tensor): |
|
if isinstance(prompt, str): |
|
batch_size = 1 |
|
else: |
|
batch_size = len(prompt) |
|
if isinstance(image, list): |
|
image_batch_size = len(image) |
|
else: |
|
image_batch_size = image.shape[0] |
|
if batch_size != image_batch_size: |
|
raise ValueError( |
|
f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." |
|
" Please make sure that passed `prompt` matches the batch size of `image`." |
|
) |
|
|
|
|
|
if noise_level > self.config.max_noise_level: |
|
raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}") |
|
|
|
if (callback_steps is None) or ( |
|
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) |
|
): |
|
raise ValueError( |
|
f"`callback_steps` has to be a positive integer but is {callback_steps} of type" |
|
f" {type(callback_steps)}." |
|
) |
|
|
|
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None): |
|
shape = [batch_size, num_channels_latents, height, width] |
|
if latents is None: |
|
latents = paddle.randn(shape, generator=generator, dtype=dtype) |
|
else: |
|
if latents.shape != shape: |
|
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") |
|
|
|
|
|
latents = latents * self.scheduler.init_noise_sigma |
|
return latents |
|
|
|
@paddle.no_grad() |
|
def __call__( |
|
self, |
|
prompt: Union[str, List[str]], |
|
image: Union[paddle.Tensor, PIL.Image.Image, List[PIL.Image.Image]], |
|
num_inference_steps: int = 75, |
|
guidance_scale: float = 9.0, |
|
noise_level: int = 20, |
|
negative_prompt: Optional[Union[str, List[str]]] = None, |
|
num_images_per_prompt: Optional[int] = 1, |
|
eta: float = 0.0, |
|
generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None, |
|
latents: Optional[paddle.Tensor] = None, |
|
output_type: Optional[str] = "pil", |
|
return_dict: bool = True, |
|
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None, |
|
callback_steps: Optional[int] = 1, |
|
): |
|
r""" |
|
Function invoked when calling the pipeline for generation. |
|
|
|
Args: |
|
prompt (`str` or `List[str]`): |
|
The prompt or prompts to guide the image generation. |
|
image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `paddle.Tensor`): |
|
`Image`, or tensor representing an image batch which will be upscaled. * |
|
num_inference_steps (`int`, *optional*, defaults to 50): |
|
The number of denoising steps. More denoising steps usually lead to a higher quality image at the |
|
expense of slower inference. |
|
guidance_scale (`float`, *optional*, defaults to 7.5): |
|
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). |
|
`guidance_scale` is defined as `w` of equation 2. of [Imagen |
|
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > |
|
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, |
|
usually at the expense of lower image quality. |
|
negative_prompt (`str` or `List[str]`, *optional*): |
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored |
|
if `guidance_scale` is less than `1`). |
|
num_images_per_prompt (`int`, *optional*, defaults to 1): |
|
The number of images to generate per prompt. |
|
eta (`float`, *optional*, defaults to 0.0): |
|
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to |
|
[`schedulers.DDIMScheduler`], will be ignored for others. |
|
generator (`paddle.Generator`, *optional*): |
|
A [paddle generator] to make generation |
|
deterministic. |
|
latents (`paddle.Tensor`, *optional*): |
|
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image |
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents |
|
tensor will ge generated by sampling using the supplied random `generator`. |
|
output_type (`str`, *optional*, defaults to `"pil"`): |
|
The output format of the generate image. Choose between |
|
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. |
|
return_dict (`bool`, *optional*, defaults to `True`): |
|
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a |
|
plain tuple. |
|
callback (`Callable`, *optional*): |
|
A function that will be called every `callback_steps` steps during inference. The function will be |
|
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`. |
|
callback_steps (`int`, *optional*, defaults to 1): |
|
The frequency at which the `callback` function will be called. If not specified, the callback will be |
|
called at every step. |
|
|
|
Returns: |
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: |
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. |
|
When returning a tuple, the first element is a list with the generated images, and the second element is a |
|
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" |
|
(nsfw) content, according to the `safety_checker`. |
|
""" |
|
|
|
|
|
self.check_inputs(prompt, image, noise_level, callback_steps) |
|
|
|
|
|
batch_size = 1 if isinstance(prompt, str) else len(prompt) |
|
|
|
|
|
|
|
do_classifier_free_guidance = guidance_scale > 1.0 |
|
|
|
|
|
text_embeddings = self._encode_prompt( |
|
prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt |
|
) |
|
|
|
|
|
image = preprocess(image) |
|
image = image.cast(text_embeddings.dtype) |
|
|
|
|
|
self.scheduler.set_timesteps(num_inference_steps) |
|
timesteps = self.scheduler.timesteps |
|
|
|
|
|
noise_level = paddle.to_tensor([noise_level], dtype="int64") |
|
noise = paddle.randn(image.shape, generator=generator, dtype=text_embeddings.dtype) |
|
image = self.low_res_scheduler.add_noise(image, noise, noise_level) |
|
batch_multiplier = 2 if do_classifier_free_guidance else 1 |
|
image = paddle.concat([image] * batch_multiplier * num_images_per_prompt) |
|
noise_level = paddle.concat([noise_level] * image.shape[0]) |
|
|
|
|
|
height, width = image.shape[2:] |
|
num_channels_latents = self.vae.config.latent_channels |
|
latents = self.prepare_latents( |
|
batch_size * num_images_per_prompt, |
|
num_channels_latents, |
|
height, |
|
width, |
|
text_embeddings.dtype, |
|
generator, |
|
latents, |
|
) |
|
|
|
|
|
num_channels_image = image.shape[1] |
|
if num_channels_latents + num_channels_image != self.unet.config.in_channels: |
|
raise ValueError( |
|
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" |
|
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" |
|
f" `num_channels_image`: {num_channels_image} " |
|
f" = {num_channels_latents+num_channels_image}. Please verify the config of" |
|
" `pipeline.unet` or your `image` input." |
|
) |
|
|
|
|
|
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
|
|
|
|
|
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order |
|
with self.progress_bar(total=num_inference_steps) as progress_bar: |
|
for i, t in enumerate(timesteps): |
|
|
|
latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents |
|
|
|
|
|
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
|
latent_model_input = paddle.concat([latent_model_input, image], axis=1) |
|
|
|
|
|
noise_pred = self.unet( |
|
latent_model_input, t, encoder_hidden_states=text_embeddings, class_labels=noise_level |
|
).sample |
|
|
|
|
|
if do_classifier_free_guidance: |
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
|
|
|
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample |
|
|
|
|
|
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): |
|
progress_bar.update() |
|
if callback is not None and i % callback_steps == 0: |
|
callback(i, t, latents) |
|
|
|
|
|
|
|
|
|
image = self.decode_latents(latents.cast("float32")) |
|
|
|
|
|
if output_type == "pil": |
|
image = self.numpy_to_pil(image) |
|
|
|
if not return_dict: |
|
return (image,) |
|
|
|
return ImagePipelineOutput(images=image) |
|
|