|
import os |
|
import numpy |
|
import torch |
|
from torch import autocast |
|
from torchvision import transforms as tfms |
|
import torch.nn.functional as F |
|
|
|
import PIL |
|
from PIL import Image |
|
|
|
from diffusers import StableDiffusionPipeline |
|
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, logging |
|
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel, KDPM2DiscreteScheduler |
|
|
|
|
|
from IPython.display import HTML |
|
from matplotlib import pyplot as plt |
|
from pathlib import Path |
|
from tqdm.auto import tqdm |
|
import cv2 |
|
|
|
bb = cv2.imread("./qr_code1.png") |
|
bb = cv2.cvtColor(bb, cv2.COLOR_BGR2RGB) |
|
tfm2 = tfms.Compose([ |
|
tfms.ToTensor(), |
|
tfms.Resize([512, 512]), |
|
tfms.CenterCrop(512), |
|
|
|
]) |
|
img2 = tfm2(bb) |
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
pretrained_model_name_or_path = "CompVis/stable-diffusion-v1-4" |
|
|
|
vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae") |
|
|
|
|
|
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") |
|
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") |
|
|
|
|
|
unet = UNet2DConditionModel.from_pretrained(pretrained_model_name_or_path, subfolder="unet") |
|
|
|
|
|
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) |
|
|
|
|
|
|
|
vae = vae.to(device) |
|
text_encoder = text_encoder.to(device) |
|
unet = unet.to(device) |
|
|
|
pipe = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path,torch_dtype=torch.float16).to(device) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
birb_embed = torch.load('./birb-style/learned_embeds.bin') |
|
herge_embed = torch.load('./herge-style/learned_embeds.bin') |
|
indian_water_color_embed = torch.load('./indian-watercolor-portraits/learned_embeds.bin') |
|
midjourney_embed = torch.load('./midjourney-style/learned_embeds.bin') |
|
marc_allante_embed = torch.load('./style-of-marc-allante/learned_embeds.bin') |
|
|
|
style_seeds = { |
|
'birb': 321, |
|
'herge': 1, |
|
'indian_watercolor': 42, |
|
'midjourney': 8081, |
|
'marc_allante': 100 |
|
} |
|
|
|
|
|
|
|
def qr_loss(images, qr_img): |
|
|
|
|
|
qr_img = qr_img.unsqueeze(0).to(device) |
|
|
|
error = F.l1_loss(images, qr_img, reduction='mean') |
|
|
|
return error |
|
|
|
def set_timesteps(scheduler, num_inference_steps): |
|
scheduler.set_timesteps(num_inference_steps) |
|
scheduler.timesteps = scheduler.timesteps.to(torch.float32) |
|
|
|
def pil_to_latent(input_im): |
|
|
|
with torch.no_grad(): |
|
latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) |
|
return 0.18215 * latent.latent_dist.sample() |
|
|
|
def latents_to_pil(latents): |
|
|
|
latents = (1 / 0.18215) * latents |
|
with torch.no_grad(): |
|
image = vae.decode(latents).sample |
|
image = (image / 2 + 0.5).clamp(0, 1) |
|
image = image.detach().cpu().permute(0, 2, 3, 1).numpy() |
|
images = (image * 255).round().astype("uint8") |
|
pil_images = [Image.fromarray(image) for image in images] |
|
return pil_images |
|
|
|
def get_output_embeds(input_embeddings): |
|
|
|
bsz, seq_len = input_embeddings.shape[:2] |
|
|
|
causal_attention_mask = build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype) |
|
|
|
|
|
|
|
encoder_outputs = text_encoder.text_model.encoder( |
|
inputs_embeds=input_embeddings, |
|
attention_mask=None, |
|
causal_attention_mask=causal_attention_mask.to(device), |
|
output_attentions=None, |
|
output_hidden_states=True, |
|
return_dict=None, |
|
) |
|
|
|
|
|
output = encoder_outputs[0] |
|
|
|
|
|
output = text_encoder.text_model.final_layer_norm(output) |
|
|
|
|
|
return output |
|
|
|
def build_causal_attention_mask(bsz, seq_len, dtype): |
|
|
|
|
|
mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) |
|
mask.fill_(torch.tensor(torch.finfo(dtype).min)) |
|
mask.triu_(1) |
|
mask = mask.unsqueeze(1) |
|
return mask |
|
|
|
def generate_with_embs_custom_loss(prompt, text_embeddings, seed): |
|
|
|
height = 512 |
|
width = 512 |
|
num_inference_steps = 50 |
|
guidance_scale = 11 |
|
generator = torch.manual_seed(seed) |
|
batch_size = 1 |
|
blue_loss_scale = 100 |
|
|
|
|
|
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
|
with torch.no_grad(): |
|
text_embeddings = text_encoder(text_input.input_ids.to(device))[0] |
|
|
|
|
|
max_length = text_input.input_ids.shape[-1] |
|
uncond_input = tokenizer( |
|
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
|
) |
|
with torch.no_grad(): |
|
uncond_embeddings = text_encoder(uncond_input.input_ids.to(device))[0] |
|
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
|
|
|
set_timesteps(scheduler, num_inference_steps) |
|
|
|
|
|
latents = torch.randn( |
|
(batch_size, unet.in_channels, height // 8, width // 8), |
|
generator=generator, |
|
) |
|
latents = latents.to(device) |
|
latents = latents * scheduler.init_noise_sigma |
|
|
|
|
|
for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): |
|
|
|
latent_model_input = torch.cat([latents] * 2) |
|
sigma = scheduler.sigmas[i] |
|
latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
|
|
|
|
|
with torch.no_grad(): |
|
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
|
|
|
|
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
|
|
|
if i%2 == 0: |
|
|
|
latents = latents.detach().requires_grad_() |
|
|
|
|
|
latents_x0 = latents - sigma * noise_pred |
|
|
|
|
|
|
|
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 |
|
|
|
|
|
|
|
|
|
loss = qr_loss(denoised_images, img2) * blue_loss_scale |
|
|
|
|
|
if i%10==0: |
|
print(i, 'loss:', loss.item()) |
|
|
|
|
|
cond_grad = torch.autograd.grad(loss, latents)[0] |
|
|
|
|
|
latents = latents.detach() - cond_grad * sigma**2 |
|
|
|
|
|
latents = scheduler.step(noise_pred, t, latents).prev_sample |
|
|
|
return latents_to_pil(latents)[0] |
|
|
|
|