amildravid4292 commited on
Commit
6b8abea
·
verified ·
1 Parent(s): fef9fbe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -517
app.py CHANGED
@@ -6,6 +6,7 @@ from torch.utils.data import Dataset, DataLoader
6
  import gradio as gr
7
  import sys
8
  import tqdm
 
9
  sys.path.append(os.path.abspath(os.path.join("", "..")))
10
  import gc
11
  import warnings
@@ -69,545 +70,53 @@ def load_models(device):
69
 
70
  return unet, vae, text_encoder, tokenizer, noise_scheduler
71
 
72
- class main():
73
- def __init__(self):
74
- super(main, self).__init__()
75
-
76
- device = "cuda"
77
- mean = torch.load(f"{models_path}/files/mean.pt", map_location=torch.device('cpu')).bfloat16().to(device)
78
- std = torch.load(f"{models_path}/files/std.pt", map_location=torch.device('cpu')).bfloat16().to(device)
79
- v = torch.load(f"{models_path}/files/V.pt", map_location=torch.device('cpu')).bfloat16().to(device)
80
- proj = torch.load(f"{models_path}/files/proj_1000pc.pt", map_location=torch.device('cpu')).bfloat16().to(device)
81
- df = torch.load(f"{models_path}/files/identity_df.pt")
82
- weight_dimensions = torch.load(f"{models_path}/files/weight_dimensions.pt")
83
- pinverse = torch.load(f"{models_path}/files/pinverse_1000pc.pt", map_location=torch.device('cpu')).bfloat16().to(device)
84
-
85
- self.device = device
86
- self.mean = mean
87
- self.std = std
88
- self.v = v
89
- self.proj = proj
90
- self.df = df
91
- self.weight_dimensions = weight_dimensions
92
- self.pinverse = pinverse
93
-
94
- pretrained_model_name_or_path = "stablediffusionapi/realistic-vision-v51"
95
-
96
- revision = None
97
- rank = 1
98
- weight_dtype = torch.bfloat16
99
-
100
- # Load scheduler, tokenizer and models.
101
- pipe = StableDiffusionPipeline.from_pretrained("stablediffusionapi/realistic-vision-v51",
102
- torch_dtype=torch.float16,safety_checker = None,
103
- requires_safety_checker = False).to(device)
104
- self.noise_scheduler = pipe.scheduler
105
- del pipe
106
- self.tokenizer = AutoTokenizer.from_pretrained(
107
- pretrained_model_name_or_path, subfolder="tokenizer", revision=revision
108
- )
109
- self.text_encoder = CLIPTextModel.from_pretrained(
110
- pretrained_model_name_or_path, subfolder="text_encoder", revision=revision
111
- )
112
- self.vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae", revision=revision)
113
- self.unet = UNet2DConditionModel.from_pretrained(
114
- pretrained_model_name_or_path, subfolder="unet", revision=revision
115
- )
116
-
117
- self.unet.requires_grad_(False)
118
- self.unet.to(device, dtype=weight_dtype)
119
- self.vae.requires_grad_(False)
120
-
121
- self.text_encoder.requires_grad_(False)
122
- self.vae.requires_grad_(False)
123
- self.vae.to(device, dtype=weight_dtype)
124
- self.text_encoder.to(device, dtype=weight_dtype)
125
- print("")
126
-
127
-
128
- self.weights = None
129
-
130
- young = get_direction(df, "Young", pinverse, 1000, device)
131
- young = debias(young, "Male", df, pinverse, device)
132
- young = debias(young, "Pointy_Nose", df, pinverse, device)
133
- young = debias(young, "Wavy_Hair", df, pinverse, device)
134
- young = debias(young, "Chubby", df, pinverse, device)
135
- young = debias(young, "No_Beard", df, pinverse, device)
136
- young = debias(young, "Mustache", df, pinverse, device)
137
- self.young = young
138
-
139
- pointy = get_direction(df, "Pointy_Nose", pinverse, 1000, device)
140
- pointy = debias(pointy, "Young", df, pinverse, device)
141
- pointy = debias(pointy, "Male", df, pinverse, device)
142
- pointy = debias(pointy, "Wavy_Hair", df, pinverse, device)
143
- pointy = debias(pointy, "Chubby", df, pinverse, device)
144
- pointy = debias(pointy, "Heavy_Makeup", df, pinverse, device)
145
- self.pointy = pointy
146
-
147
- wavy = get_direction(df, "Wavy_Hair", pinverse, 1000, device)
148
- wavy = debias(wavy, "Young", df, pinverse, device)
149
- wavy = debias(wavy, "Male", df, pinverse, device)
150
- wavy = debias(wavy, "Pointy_Nose", df, pinverse, device)
151
- wavy = debias(wavy, "Chubby", df, pinverse, device)
152
- wavy = debias(wavy, "Heavy_Makeup", df, pinverse, device)
153
- self.wavy = wavy
154
-
155
-
156
- thick = get_direction(df, "Bushy_Eyebrows", pinverse, 1000, device)
157
- thick = debias(thick, "Male", df, pinverse, device)
158
- thick = debias(thick, "Young", df, pinverse, device)
159
- thick = debias(thick, "Pointy_Nose", df, pinverse, device)
160
- thick = debias(thick, "Wavy_Hair", df, pinverse, device)
161
- thick = debias(thick, "Mustache", df, pinverse, device)
162
- thick = debias(thick, "No_Beard", df, pinverse, device)
163
- thick = debias(thick, "Sideburns", df, pinverse, device)
164
- thick = debias(thick, "Big_Nose", df, pinverse, device)
165
- thick = debias(thick, "Big_Lips", df, pinverse, device)
166
- thick = debias(thick, "Black_Hair", df, pinverse, device)
167
- thick = debias(thick, "Brown_Hair", df, pinverse, device)
168
- thick = debias(thick, "Pale_Skin", df, pinverse, device)
169
- thick = debias(thick, "Heavy_Makeup", df, pinverse, device)
170
- self.thick = thick
171
-
172
-
173
-
174
-
175
-
176
- @torch.no_grad()
177
- @spaces.GPU(duration=120)
178
- def inference(self, prompt, negative_prompt, guidance_scale, ddim_steps, seed):
179
- device = self.device
180
- self.unet.to(device)
181
- self.text_encoder.to(device)
182
- self.vae.to(device)
183
- self.mean.to(device)
184
- self.std.to(device)
185
- self.v.to(device)
186
- self.proj.to(device)
187
- self.weights.to(device)
188
-
189
- network = LoRAw2w( self.weights.bfloat16(), self.mean.bfloat16(), self.std.bfloat16(), self.v[:, :1000].bfloat16(),
190
- self.unet,
191
- rank=1,
192
- multiplier=1.0,
193
- alpha=27.0,
194
- train_method="xattn-strict"
195
- ).to(device, torch.bfloat16)
196
-
197
-
198
-
199
- generator = torch.Generator(device=device).manual_seed(seed)
200
- latents = torch.randn(
201
- (1, self.unet.in_channels, 512 // 8, 512 // 8),
202
- generator = generator,
203
- device = self.device
204
- ).bfloat16()
205
-
206
-
207
- text_input = self.tokenizer(prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt")
208
-
209
- text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0]
210
-
211
- max_length = text_input.input_ids.shape[-1]
212
- uncond_input = self.tokenizer(
213
- [negative_prompt], padding="max_length", max_length=max_length, return_tensors="pt"
214
- )
215
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(device))[0]
216
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).bfloat16()
217
- self.noise_scheduler.set_timesteps(ddim_steps)
218
- latents = latents * self.noise_scheduler.init_noise_sigma
219
-
220
- for i,t in enumerate(tqdm.tqdm(self.noise_scheduler.timesteps)):
221
- latent_model_input = torch.cat([latents] * 2)
222
- latent_model_input = self.noise_scheduler.scale_model_input(latent_model_input, timestep=t)
223
-
224
- with network:
225
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings, timestep_cond= None).sample
226
-
227
- #guidance
228
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
229
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
230
- latents = self.noise_scheduler.step(noise_pred, t, latents).prev_sample
231
-
232
- latents = 1 / 0.18215 * latents
233
- image = self.vae.decode(latents.float()).sample
234
- image = (image / 2 + 0.5).clamp(0, 1)
235
- image = image.detach().cpu().float().permute(0, 2, 3, 1).numpy()[0]
236
-
237
- image = Image.fromarray((image * 255).round().astype("uint8"))
238
-
239
- return image
240
 
241
 
242
- @torch.no_grad()
243
- @spaces.GPU(duration=120)
244
- def edit_inference(self, prompt, negative_prompt, guidance_scale, ddim_steps, seed, start_noise, a1, a2, a3, a4):
245
- print("start")
246
- device = self.device
247
- self.unet.to(device)
248
- self.text_encoder.to(device)
249
- self.vae.to(device)
250
- self.mean.to(device)
251
- self.std.to(device)
252
- self.v.to(device)
253
- self.proj.to(device)
254
- self.weights = torch.load("model.pt").to(device)
255
- self.young.to(device)
256
- self.pointy.to(device)
257
- self.wavy.to(device)
258
- self.thick.to(device)
259
-
260
- network = LoRAw2w( self.weights.bfloat16(), self.mean.bfloat16(), self.std.bfloat16(), self.v[:, :1000].bfloat16(),
261
- self.unet,
262
- rank=1,
263
- multiplier=1.0,
264
- alpha=27.0,
265
- train_method="xattn-strict"
266
- ).to(device, torch.bfloat16)
267
-
268
-
269
- original_weights = self.weights.clone()
270
-
271
- #pad to same number of PCs
272
- pcs_original = original_weights.shape[1]
273
- pcs_edits = self.young.shape[1]
274
- padding = torch.zeros((1,pcs_original-pcs_edits)).to(device)
275
- young_pad = torch.cat((self.young, padding), 1)
276
- pointy_pad = torch.cat((self.pointy, padding), 1)
277
- wavy_pad = torch.cat((self.wavy, padding), 1)
278
- thick_pad = torch.cat((self.thick, padding), 1)
279
-
280
-
281
- edited_weights = original_weights+a1*1e6*young_pad+a2*1e6*pointy_pad+a3*1e6*wavy_pad+a4*2e6*thick_pad
282
-
283
- generator = torch.Generator(device=device).manual_seed(seed)
284
- latents = torch.randn(
285
- (1, self.unet.in_channels, 512 // 8, 512 // 8),
286
- generator = generator,
287
- device = self.device
288
- ).bfloat16()
289
-
290
-
291
- text_input = self.tokenizer(prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt")
292
-
293
- text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0]
294
-
295
- max_length = text_input.input_ids.shape[-1]
296
- uncond_input = self.tokenizer(
297
- [negative_prompt], padding="max_length", max_length=max_length, return_tensors="pt"
298
- )
299
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(device))[0]
300
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).bfloat16()
301
- self.noise_scheduler.set_timesteps(ddim_steps)
302
- latents = latents * self.noise_scheduler.init_noise_sigma
303
-
304
-
305
-
306
- for i,t in enumerate(tqdm.tqdm(self.noise_scheduler.timesteps)):
307
- latent_model_input = torch.cat([latents] * 2)
308
- latent_model_input = self.noise_scheduler.scale_model_input(latent_model_input, timestep=t)
309
-
310
- if t>start_noise:
311
- pass
312
- elif t<=start_noise:
313
- network.proj = torch.nn.Parameter(edited_weights)
314
- network.reset()
315
-
316
- with network:
317
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings, timestep_cond= None).sample
318
-
319
-
320
- #guidance
321
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
322
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
323
- latents = noise_scheduler.step(noise_pred, t, latents).prev_sample
324
-
325
- latents = 1 / 0.18215 * latents
326
- image = self.vae.decode(latents.float()).sample
327
- image = (image / 2 + 0.5).clamp(0, 1)
328
-
329
- image = image.detach().cpu().float().permute(0, 2, 3, 1).numpy()[0]
330
-
331
- image = Image.fromarray((image * 255).round().astype("uint8"))
332
 
333
-
334
- return image
335
 
336
- @torch.no_grad()
337
- @spaces.GPU(duration=120)
338
- def sample_then_run(self):
339
- self.unet = UNet2DConditionModel.from_pretrained(
340
- "stablediffusionapi/realistic-vision-v51" , subfolder="unet", revision=None
341
- )
342
- self.unet.to(self.device, dtype=torch.bfloat16)
343
- self.weights = sample_weights(self.unet, self.proj, self.mean, self.std, self.v[:, :1000], self.device, factor = 1.00)
344
-
345
- prompt = "sks person"
346
- negative_prompt = "low quality, blurry, unfinished, nudity, weapon"
347
- seed = 5
348
- cfg = 3.0
349
- steps = 25
350
- image = self.inference(prompt, negative_prompt, cfg, steps, seed)
351
- torch.save(self.weights.cpu().detach(), "model.pt" )
352
- return image, "model.pt"
353
 
 
354
 
355
 
356
- class CustomImageDataset(Dataset):
357
- def __init__(self, images, transform=None):
358
- self.images = images
359
- self.transform = transform
360
-
361
- def __len__(self):
362
- return len(self.images)
363
-
364
- def __getitem__(self, idx):
365
- image = self.images[idx]
366
- if self.transform:
367
- image = self.transform(image)
368
- return image
369
-
370
- @spaces.GPU
371
- def invert(self, image, mask, pcs=10000, epochs=400, weight_decay = 1e-10, lr=1e-1):
372
-
373
- del unet
374
- del network
375
- unet, _, _, _, _ = load_models(device)
376
-
377
- proj = torch.zeros(1,pcs).bfloat16().to(device)
378
- network = LoRAw2w( proj, mean, std, v[:, :pcs],
379
- unet,
380
- rank=1,
381
- multiplier=1.0,
382
- alpha=27.0,
383
- train_method="xattn-strict"
384
- ).to(device, torch.bfloat16)
385
 
386
- ### load mask
387
- mask = transforms.Resize((64,64), interpolation=transforms.InterpolationMode.BILINEAR)(mask)
388
- mask = torchvision.transforms.functional.pil_to_tensor(mask).unsqueeze(0).to(device).bfloat16()[:,0,:,:].unsqueeze(1)
389
- ### check if an actual mask was draw, otherwise mask is just all ones
390
- if torch.sum(mask) == 0:
391
- mask = torch.ones((1,1,64,64)).to(device).bfloat16()
392
-
393
- ### single image dataset
394
- image_transforms = transforms.Compose([transforms.Resize(512, interpolation=transforms.InterpolationMode.BILINEAR),
395
- transforms.RandomCrop(512),
396
- transforms.ToTensor(),
397
- transforms.Normalize([0.5], [0.5])])
398
-
399
-
400
- train_dataset = CustomImageDataset(image, transform=image_transforms)
401
- train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True)
402
-
403
- ### optimizer
404
- optim = torch.optim.Adam(network.parameters(), lr=lr, weight_decay=weight_decay)
405
-
406
- ### training loop
407
- unet.train()
408
- for epoch in tqdm.tqdm(range(epochs)):
409
- for batch in train_dataloader:
410
- ### prepare inputs
411
- batch = batch.to(device).bfloat16()
412
- latents = vae.encode(batch).latent_dist.sample()
413
- latents = latents*0.18215
414
- noise = torch.randn_like(latents)
415
- bsz = latents.shape[0]
416
-
417
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
418
- timesteps = timesteps.long()
419
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
420
- text_input = tokenizer("sks person", padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
421
- text_embeddings = text_encoder(text_input.input_ids.to(device))[0]
422
-
423
- ### loss + sgd step
424
- with network:
425
- model_pred = unet(noisy_latents, timesteps, text_embeddings).sample
426
- loss = torch.nn.functional.mse_loss(mask*model_pred.float(), mask*noise.float(), reduction="mean")
427
- optim.zero_grad()
428
- loss.backward()
429
- optim.step()
430
-
431
- ### return optimized network
432
- return network
433
 
 
434
 
435
- @spaces.GPU
436
- def run_inversion(self, dict, pcs, epochs, weight_decay,lr):
437
- init_image = dict["image"].convert("RGB").resize((512, 512))
438
- mask = dict["mask"].convert("RGB").resize((512, 512))
439
- network = invert([init_image], mask, pcs, epochs, weight_decay,lr)
440
-
441
-
442
- #sample an image
443
- prompt = "sks person"
444
- negative_prompt = "low quality, blurry, unfinished, nudity"
445
- seed = 5
446
- cfg = 3.0
447
- steps = 25
448
- image = inference( prompt, negative_prompt, cfg, steps, seed)
449
- torch.save(network.proj, "model.pt" )
450
- return image, "model.pt"
451
-
452
-
453
- @spaces.GPU
454
- def file_upload(self, file):
455
-
456
- proj = torch.load(file.name).to(device)
457
-
458
- #pad to 10000 Principal components to keep everything consistent
459
- pcs = proj.shape[1]
460
- padding = torch.zeros((1,10000-pcs)).to(device)
461
- proj = torch.cat((proj, padding), 1)
462
-
463
- unet, _, _, _, _ = load_models(device)
464
-
465
-
466
- network = LoRAw2w( proj, mean, std, v[:, :10000],
467
- unet,
468
- rank=1,
469
- multiplier=1.0,
470
- alpha=27.0,
471
- train_method="xattn-strict"
472
- ).to(device, torch.bfloat16)
473
-
474
-
475
- prompt = "sks person"
476
- negative_prompt = "low quality, blurry, unfinished, nudity"
477
- seed = 5
478
- cfg = 3.0
479
- steps = 25
480
- image = inference( prompt, negative_prompt, cfg, steps, seed)
481
- return image
482
-
483
-
484
 
485
-
486
- intro = """
487
- <div style="display: flex;align-items: center;justify-content: center">
488
- <h1 style="margin-left: 12px;text-align: center;margin-bottom: 7px;display: inline-block"><em>weights2weights</em> Demo</h1>
489
- <h3 style="display: inline-block;margin-left: 10px;margin-top: 6px;font-weight: 500">Interpreting the Weight Space of Customized Diffusion Models</h3>
490
- </div>
491
- <p style="font-size: 0.95rem;margin: 0rem;line-height: 1.2em;margin-top:1em;display: inline-block">
492
- <a href="https://snap-research.github.io/weights2weights/" target="_blank">Project Page</a> | <a href="https://arxiv.org/abs/2406.09413" target="_blank">Paper</a>
493
- | <a href="https://github.com/snap-research/weights2weights" target="_blank">Code</a> |
494
- <a href="https://huggingface.co/spaces/Snapchat/w2w-demo?duplicate=true" target="_blank" style="
495
- display: inline-block;
496
- ">
497
- <img style="margin-top: -1em;margin-bottom: 0em;position: absolute;" src="https://bit.ly/3CWLGkA" alt="Duplicate Space"></a>
498
- </p>
499
- """
500
-
501
-
502
 
503
  with gr.Blocks(css="style.css") as demo:
504
- model = main()
505
- gr.HTML(intro)
506
-
507
- gr.Markdown("""<div style="text-align: justify;"> In this demo, you can get an identity-encoding model by sampling or inverting. To use a model previously downloaded from this demo see \"Uploading a model\" in the Advanced Options. Next, you can generate new images from it, or edit the identity encoded in the model and generate images from the edited model. We provide detailed instructions and tips at the bottom of the page.""")
508
  with gr.Column():
509
  with gr.Row():
510
  with gr.Column():
511
- gr.Markdown("""1) Either sample a new model, or upload an image (optionally draw a mask over the head) and click `invert`.""")
512
  sample = gr.Button("🎲 Sample New Model")
513
- input_image = gr.ImageEditor(elem_id="image_upload", type='pil', label="Reference Identity",
514
- width=512, height=512)
515
-
516
- with gr.Row():
517
- invert_button = gr.Button("⬆️ Invert")
518
-
519
-
520
-
521
- with gr.Column():
522
- gr.Markdown("""2) Generate images of the sampled/inverted identity or edit the identity with the sliders and generate new images with various prompts and seeds.""")
523
- gallery = gr.Image(label="Generated Image",height=512, width=512, interactive=False)
524
- submit = gr.Button("Generate")
525
-
526
-
527
- prompt = gr.Textbox(label="Prompt",
528
- info="Make sure to include 'sks person'" ,
529
- placeholder="sks person",
530
- value="sks person")
531
-
532
- seed = gr.Number(value=5, label="Seed", precision=0, interactive=True)
533
 
534
- # Editing
535
- with gr.Column():
536
- with gr.Row():
537
- a1 = gr.Slider(label="- Young +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
538
- a2 = gr.Slider(label="- Pointy Nose +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
539
- with gr.Row():
540
- a3 = gr.Slider(label="- Curly Hair +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
541
- a4 = gr.Slider(label="- Thick Eyebrows +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
542
-
543
-
544
- with gr.Accordion("Advanced Options", open=False):
545
- with gr.Tab("Inversion"):
546
- with gr.Row():
547
- lr = gr.Number(value=1e-1, label="Learning Rate", interactive=True)
548
- pcs = gr.Slider(label="# Principal Components", value=10000, step=1, minimum=1, maximum=10000, interactive=True)
549
- with gr.Row():
550
- epochs = gr.Slider(label="Epochs", value=800, step=1, minimum=1, maximum=2000, interactive=True)
551
- weight_decay = gr.Number(value=1e-10, label="Weight Decay", interactive=True)
552
- with gr.Tab("Sampling"):
553
- with gr.Row():
554
- cfg= gr.Slider(label="CFG", value=3.0, step=0.1, minimum=0, maximum=10, interactive=True)
555
- steps = gr.Slider(label="Inference Steps", value=25, step=1, minimum=0, maximum=100, interactive=True)
556
- with gr.Row():
557
- negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="low quality, blurry, unfinished, nudity, weapon", value="low quality, blurry, unfinished, nudity, weapon")
558
- injection_step = gr.Slider(label="Injection Step", value=800, step=1, minimum=0, maximum=1000, interactive=True)
559
-
560
- with gr.Tab("Uploading a model"):
561
- gr.Markdown("""<div style="text-align: justify;">Upload a model below downloaded from this demo.""")
562
-
563
- file_input = gr.File(label="Upload Model", container=True)
564
-
565
-
566
-
567
-
568
- gr.Markdown("""<div style="text-align: justify;"> After sampling a new model or inverting, you can download the model below.""")
569
-
570
- with gr.Row():
571
- file_output = gr.File(label="Download Sampled/Inverted Model", container=True, interactive=False)
572
-
573
-
574
-
575
-
576
- invert_button.click(fn=model.run_inversion,
577
- inputs=[input_image, pcs, epochs, weight_decay,lr],
578
- outputs = [input_image, file_output])
579
-
580
 
581
- sample.click(fn=model.sample_then_run, outputs=[input_image, file_output])
582
 
583
- submit.click(
584
- fn=model.edit_inference, inputs=[prompt, negative_prompt, cfg, steps, seed, injection_step, a1, a2, a3, a4], outputs=[gallery]
585
- )
586
- file_input.change(fn=model.file_upload, inputs=file_input, outputs = gallery)
587
-
588
 
589
 
590
- help_text1 = """
591
- <b>Instructions</b>:
592
- 1. To get results faster without waiting in queue, you can duplicate into a private space with an A100 GPU.
593
- 2. To begin, you will have to get an identity-encoding model. You can either sample one from *weights2weights* space by clicking `Sample New Model` or by uploading an image and clicking `invert` to invert the identity into a model. You can optionally draw over the head to define a mask in the image for better results. Sampling a model takes around 10 seconds and inversion takes around 2 minutes. After this is done, you can optionally download this model for later use. A model can be uploaded in the \"Uploading a model\" tab in the `Advanced Options`.
594
- 3. After getting a model, an image of the identity will be displayed on the right. You can sample from the model by changing seeds as well as prompts and then clicking `Generate`. Make sure to include \"sks person\" in your prompt to keep the same identity.
595
- 4. The identity in the model can be edited by changing the sliders for various attributes. After clicking `Generate`, you can see how the identity has changed and the effects are maintained across different seeds and prompts.
596
- """
597
- help_text2 = """<b>Tips</b>:
598
- 1. Editing and Identity Generation
599
- * If you are interested in preserving more of the image during identity-editing (i.e., where the same seed and prompt results in the same image with only the identity changed), you can play with the "Injection Step" parameter in the \"Sampling\" tab in the `Advanced Options`. During the first *n* timesteps, the original model's weights will be used, and then the edited weights will be set during the remaining steps. Values closer to 1000 will set the edited weights early, having a more pronounced effect, which may disrupt some semantics and structure of the generated image. Lower values will set the edited weights later, better preserving image context. We notice that around 600-800 tends to produce the best results. Larger values in the range (700-1000) are helpful for more global attribute changes, while smaller (400-700) can be used for more finegrained edits. Although it is not always needed.
600
- * You can play around with negative prompts, number of inference steps, and CFG in the \"Sampling\" tab in the `Advanced Options` to affect the ultimate image quality.
601
- * Sometimes the identity will not be perfectly consistent (e.g., there might be small variations of the face) when you use some seeds or prompts. This is a limitation of our method as well as an open-problem in personalized models.
602
- 2. Inversion
603
- * To obtain the best results for inversion, upload a high resolution photo of the face with minimal occlusion. It is recommended to draw over the face and hair to define a mask. But inversion should still work generally for non-closeup face shots.
604
- * For inverting a realistic photo of an identity, typically 800 epochs with lr=1e-1 and 10,000 principal components (PCs) works well. If the resulting generations have artifacted and unrealstic textures, there is probably overfitting and you may want to reduce the number of epochs or learning rate, or play with weight decay. If the generations do not look like the input photo, then you may want to increase the number of epochs.
605
- * For inverting out-of-distribution identities, such as artistic renditions of people or non-humans (e.g. the ones shown in the paper), it is recommended to use 1000 PCs, lr=1, and train for 800 epochs.
606
- * Note that if you change the number of PCs, you will probably need to change the learning rate. For less PCs, higher learning rates are typically required."""
607
-
608
-
609
- gr.Markdown(help_text1)
610
- gr.Markdown(help_text2)
611
 
612
  demo.queue().launch()
613
 
 
6
  import gradio as gr
7
  import sys
8
  import tqdm
9
+ import uuid
10
  sys.path.append(os.path.abspath(os.path.join("", "..")))
11
  import gc
12
  import warnings
 
70
 
71
  return unet, vae, text_encoder, tokenizer, noise_scheduler
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
 
 
76
 
77
+ device="cuda"
78
+ mean = torch.load(f"{models_path}/files/mean.pt", map_location=torch.device('cpu')).bfloat16().to(device)
79
+ std = torch.load(f"{models_path}/files/std.pt", map_location=torch.device('cpu')).bfloat16().to(device)
80
+ v = torch.load(f"{models_path}/files/V.pt", map_location=torch.device('cpu')).bfloat16().to(device)
81
+ proj = torch.load(f"{models_path}/files/proj_1000pc.pt", map_location=torch.device('cpu')).bfloat16().to(device)
82
+ df = torch.load(f"{models_path}/files/identity_df.pt")
83
+ weight_dimensions = torch.load(f"{models_path}/files/weight_dimensions.pt")
84
+ pinverse = torch.load(f"{models_path}/files/pinverse_1000pc.pt", map_location=torch.device('cpu')).bfloat16().to(device)
 
 
 
 
 
 
 
 
 
85
 
86
+ unet, vae, text_encoder, tokenizer, noise_scheduler = load_models(device)
87
 
88
 
89
+ @spaces.GPU
90
+ def sample_then_run():
91
+ # get mean and standard deviation for each principal component
92
+ m = torch.mean(proj, 0)
93
+ standev = torch.std(proj, 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
+ # sample
96
+ sample = torch.zeros([1, 1000]).to(device)
97
+ for i in range(1000):
98
+ sample[0, i] = torch.normal(m[i], factor*standev[i], (1,1))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
+ net = "model_"+str(uuid.uuid4())[:4]+".pt"
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
+ return net
104
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
  with gr.Blocks(css="style.css") as demo:
107
+ net = gr.State()
 
 
 
108
  with gr.Column():
109
  with gr.Row():
110
  with gr.Column():
 
111
  sample = gr.Button("🎲 Sample New Model")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
+ sample.click(fn=sample_then_run, inputs = [net], outputs=[net])
116
 
117
+
 
 
 
 
118
 
119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  demo.queue().launch()
122