Spaces:
Runtime error
Runtime error
File size: 13,735 Bytes
5bd4e3a 2166f83 5bd4e3a 2166f83 988da55 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5bd4e3a 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 2166f83 5ca6299 5bd4e3a 5ca6299 13dcc8e 5ca6299 13dcc8e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 |
import gradio as gr
from base64 import b64encode
import numpy
import torch
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
from PIL import Image
from torch import autocast
from torchvision import transforms as tfms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer, logging
import torchvision.transforms as T
torch.manual_seed(1)
logging.set_verbosity_error()
torch_device = "cpu"
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
# Load the tokenizer and text encoder to tokenize and encode the text.
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
# The UNet model for generating the latents.
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet")
# The noise scheduler
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
vae = vae.to(torch_device)
text_encoder = text_encoder.to(torch_device)
unet = unet.to(torch_device);
token_emb_layer = text_encoder.text_model.embeddings.token_embedding
pos_emb_layer = text_encoder.text_model.embeddings.position_embedding
position_ids = text_encoder.text_model.embeddings.position_ids[:, :77]
position_embeddings = pos_emb_layer(position_ids)
def pil_to_latent(input_im):
# Single image -> single latent in a batch (so size 1, 4, 64, 64)
with torch.no_grad():
latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) # Note scaling
return 0.18215 * latent.latent_dist.sample()
def latents_to_pil(latents):
# bath of latents -> list of images
latents = (1 / 0.18215) * latents
with torch.no_grad():
image = vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
images = (image * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
return pil_images
def get_output_embeds(input_embeddings):
# CLIP's text model uses causal mask, so we prepare it here:
bsz, seq_len = input_embeddings.shape[:2]
causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype)
# Getting the output embeddings involves calling the model with passing output_hidden_states=True
# so that it doesn't just return the pooled final predictions:
encoder_outputs = text_encoder.text_model.encoder(
inputs_embeds=input_embeddings,
attention_mask=None, # We aren't using an attention mask so that can be None
causal_attention_mask=causal_attention_mask.to(torch_device),
output_attentions=None,
output_hidden_states=True, # We want the output embs not the final output
return_dict=None,
)
# We're interested in the output hidden state only
output = encoder_outputs[0]
# There is a final layer norm we need to pass these through
output = text_encoder.text_model.final_layer_norm(output)
# And now they're ready!
return output
def generate_with_embs(text_embeddings, seed, max_length):
height = 512 # default height of Stable Diffusion
width = 512 # default width of Stable Diffusion
num_inference_steps = 10 # Number of denoising steps
guidance_scale = 7.5 # Scale for classifier-free guidance
generator = torch.manual_seed(seed) # Seed generator to create the inital latent noise
batch_size = 1
# max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Prep Scheduler
set_timesteps(scheduler, num_inference_steps)
# Prep latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
# Loop
for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# perform guidance
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = scheduler.step(noise_pred, t, latents).prev_sample
return latents_to_pil(latents)[0]
# Prep Scheduler
def set_timesteps(scheduler, num_inference_steps):
scheduler.set_timesteps(num_inference_steps)
scheduler.timesteps = scheduler.timesteps.to(torch.float32) # minor fix to ensure MPS compatibility, fixed in diffusers PR 3925
def embed_style(prompt, style_embed, style_seed):
# Tokenize
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
input_ids = text_input.input_ids.to(torch_device)
# Get token embeddings
token_embeddings = token_emb_layer(input_ids)
replacement_token_embedding = style_embed.to(torch_device)
# replacement_token_embedding = birb_embed[embed_values[4]].to(torch_device)
# Assuming token_embeddings has shape [batch_size, seq_length, embedding_dim]
replacement_token_embedding = replacement_token_embedding[:768] # Adjust the size
replacement_token_embedding = replacement_token_embedding.unsqueeze(0) # Make it [1, 768] if necessary
# Insert this into the token embeddings
# token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
indices = torch.where(input_ids[0] == 6829)[0]
for index in indices:
token_embeddings[0, index] = replacement_token_embedding.to(torch_device)
# Combine with pos embs
input_embeddings = token_embeddings + position_embeddings
# Feed through to get final output embs
modified_output_embeddings = get_output_embeds(input_embeddings)
# And generate an image with this:
max_length = text_input.input_ids.shape[-1]
return generate_with_embs(modified_output_embeddings, style_seed, max_length)
def loss_style(prompt, style_embed, style_seed):
# Tokenize
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
input_ids = text_input.input_ids.to(torch_device)
# Get token embeddings
token_embeddings = token_emb_layer(input_ids)
# The new embedding - our special birb word
replacement_token_embedding = style_embed.to(torch_device)
# Assuming token_embeddings has shape [batch_size, seq_length, embedding_dim]
replacement_token_embedding = replacement_token_embedding[:768] # Adjust the size
replacement_token_embedding = replacement_token_embedding.unsqueeze(0) # Make it [1, 768] if necessary
indices = torch.where(input_ids[0] == 6829)[0] # Extract indices where the condition is True
print(f"indices: {indices}") # Debug print
for index in indices:
print(f"index: {index}") # Debug print
token_embeddings[0, index] = replacement_token_embedding.to(torch_device) # Update each index
# Insert this into the token embeddings
# token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
# Combine with pos embs
input_embeddings = token_embeddings + position_embeddings
# Feed through to get final output embs
modified_output_embeddings = get_output_embeds(input_embeddings)
# And generate an image with this:
max_length = text_input.input_ids.shape[-1]
return generate_loss_based_image(modified_output_embeddings, style_seed,max_length)
def sepia_loss(images):
sepia_tone = 0.393 * images[:,0] + 0.769 * images[:,1] + 0.189 * images[:,2]
error = torch.abs(sepia_tone - 0.5).mean()
return error
def generate_loss_based_image(text_embeddings, seed, max_length):
height = 64
width = 64
num_inference_steps = 10
guidance_scale = 8
generator = torch.manual_seed(64)
batch_size = 1
loss_scale = 200
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Prep Scheduler
set_timesteps(scheduler, num_inference_steps+1)
# Prep latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
sched_out = None
# Loop
for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# perform CFG
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
### ADDITIONAL GUIDANCE ###
if i%5 == 0 and i>0:
# Requires grad on the latents
latents = latents.detach().requires_grad_()
# Get the predicted x0:
scheduler._step_index -= 1
latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample
# Decode to image space
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
# Calculate loss
loss = sepia_loss(denoised_images) * loss_scale
# Occasionally print it out
# if i%10==0:
print(i, 'loss:', loss)
# Get gradient
cond_grad = torch.autograd.grad(loss, latents)[0]
# Modify the latents based on this gradient
latents = latents.detach() - cond_grad * sigma**2
# To PIL Images
im_t0 = latents_to_pil(latents_x0)[0]
im_next = latents_to_pil(latents)[0]
# Now step with scheduler
latents = scheduler.step(noise_pred, t, latents).prev_sample
return latents_to_pil(latents)[0]
def generate_image_from_prompt(text_in, style_in):
STYLE_LIST = ['learned_embeds_gartic-phone_style.bin', 'learned_embeds_hawaiian-shirt_style.bin', 'learned_embeds_phone01_style.bin', 'learned_embeds_style-spdmn_style.bin', 'learned_embedssd_yvmqznrm_style.bin']
#learned_embeds = [learned_embeds_gartic-phone.bin,learned_embeds_libraryhawaiian-shirt.bin, learned_embeds_phone0.bin1,learned_embeds_style-spdmn.bin,learned_embedssd_yvmqznrm.bin]
STYLE_SEEDS = [128, 64, 128, 64, 128]
print(text_in)
print(style_in)
style_file = style_in + '_style.bin'
idx = STYLE_LIST.index(style_file)
print(style_file)
print(idx)
prompt = text_in
style_seed = STYLE_SEEDS[idx]
style_dict = torch.load(style_file)
style_embed = [v for v in style_dict.values()]
generated_image = embed_style(prompt, style_embed[0], style_seed)
loss_generated_img = (loss_style(prompt, style_embed[0], style_seed))
return [generated_image, loss_generated_img]
# Define Interface
title = 'Stable Diffusion Art Generator'
# Add clear and concise labels and instructions
prompt_label = "Enter a prompt (e.g., 'A campfire (oil on canvas)'"
styles_label = "Select a Pretrained Style:"
instructions = "Explore creative art generation using Stable Diffusion. Enter a prompt and choose a style to get started."
demo = gr.Interface(generate_image_from_prompt,
inputs=[
gr.Textbox('A campfire (oil on canvas)', label=prompt_label),
gr.Dropdown(
['learned_embeds_gartic-phone', 'learned_embeds_hawaiian-shirt', 'learned_embeds_phone01', 'learned_embeds_style-spdmn', 'learned_embedssd_yvmqznrm'],
value="learned_embeds_gartic-phone",
label=styles_label
),
],
outputs=[
gr.Gallery(label="Generated Images", show_label=False, elem_id="gallery", columns=[2], rows=[2],
object_fit="contain"),
],
title=title,
description=instructions
)
demo.launch(debug=True)
|