amildravid4292 commited on
Commit
7dcf34d
·
verified ·
1 Parent(s): 83c3645

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -33
app.py CHANGED
@@ -112,23 +112,43 @@ thick = debias(thick, "Black_Hair", df, pinverse, device)
112
  thick = debias(thick, "Brown_Hair", df, pinverse, device)
113
  thick = debias(thick, "Pale_Skin", df, pinverse, device)
114
  thick = debias(thick, "Heavy_Makeup", df, pinverse, device)
115
-
 
116
 
117
  @torch.no_grad()
118
- @spaces.GPU(duration=120)
119
- def inference(self, prompt, negative_prompt, guidance_scale, ddim_steps, seed):
120
- device = self.device
121
- self.unet.to(device)
122
- self.text_encoder.to(device)
123
- self.vae.to(device)
124
- self.mean.to(device)
125
- self.std.to(device)
126
- self.v.to(device)
127
- self.proj.to(device)
128
- self.weights.to(device)
129
 
130
- network = LoRAw2w( self.weights.bfloat16(), self.mean.bfloat16(), self.std.bfloat16(), self.v[:, :1000].bfloat16(),
131
- self.unet,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  rank=1,
133
  multiplier=1.0,
134
  alpha=27.0,
@@ -176,6 +196,8 @@ def inference(self, prompt, negative_prompt, guidance_scale, ddim_steps, seed):
176
  image = image.detach().cpu().float().permute(0, 2, 3, 1).numpy()[0]
177
 
178
  image = Image.fromarray((image * 255).round().astype("uint8"))
 
 
179
 
180
  return image
181
 
@@ -270,23 +292,23 @@ def edit_inference(self, prompt, negative_prompt, guidance_scale, ddim_steps, se
270
 
271
  return image
272
 
273
- @torch.no_grad()
274
- @spaces.GPU(duration=120)
275
- def sample_then_run(self):
276
- self.unet = UNet2DConditionModel.from_pretrained(
277
- "stablediffusionapi/realistic-vision-v51" , subfolder="unet", revision=None
278
- )
279
- self.unet.to(self.device, dtype=torch.bfloat16)
280
- self.weights = sample_weights(self.unet, self.proj, self.mean, self.std, self.v[:, :1000], self.device, factor = 1.00)
281
-
282
- prompt = "sks person"
283
- negative_prompt = "low quality, blurry, unfinished, nudity, weapon"
284
- seed = 5
285
- cfg = 3.0
286
- steps = 25
287
- image = self.inference(prompt, negative_prompt, cfg, steps, seed)
288
- torch.save(self.weights.cpu().detach(), "model.pt" )
289
- return image, "model.pt"
290
 
291
 
292
 
@@ -434,7 +456,7 @@ intro = """
434
 
435
 
436
  with gr.Blocks(css="style.css") as demo:
437
-
438
  gr.HTML(intro)
439
 
440
  gr.Markdown("""<div style="text-align: justify;"> In this demo, you can get an identity-encoding model by sampling or inverting. To use a model previously downloaded from this demo see \"Uploading a model\" in the Advanced Options. Next, you can generate new images from it, or edit the identity encoded in the model and generate images from the edited model. We provide detailed instructions and tips at the bottom of the page.""")
@@ -511,7 +533,7 @@ with gr.Blocks(css="style.css") as demo:
511
  # outputs = [input_image, file_output])
512
 
513
 
514
- # sample.click(fn=model.sample_then_run, outputs=[input_image, file_output])
515
 
516
  # submit.click(
517
  # fn=model.edit_inference, inputs=[prompt, negative_prompt, cfg, steps, seed, injection_step, a1, a2, a3, a4], outputs=[gallery]
 
112
  thick = debias(thick, "Brown_Hair", df, pinverse, device)
113
  thick = debias(thick, "Pale_Skin", df, pinverse, device)
114
  thick = debias(thick, "Heavy_Makeup", df, pinverse, device)
115
+
116
+
117
 
118
  @torch.no_grad()
119
+ @spaces.GPU
120
+ def sample_then_run(net):
121
+ print(net)
122
+ # get mean and standard deviation for each principal component
123
+ m = torch.mean(proj, 0)
124
+ standev = torch.std(proj, 0)
125
+
126
+ # sample
127
+ sample = torch.zeros([1, 1000]).to(device)
128
+ for i in range(1000):
129
+ sample[0, i] = torch.normal(m[i], standev[i], (1,1))
130
 
131
+ net = "model_"+str(uuid.uuid4())[:4]+".pt"
132
+ torch.save(sample, net)
133
+
134
+ image = prompt = "sks person"
135
+ negative_prompt = "low quality, blurry, unfinished, nudity, weapon"
136
+ seed = 5
137
+ cfg = 3.0
138
+ steps = 25
139
+ image = inference(net, prompt, negative_prompt, cfg, steps, seed)
140
+ return net, image
141
+
142
+ @torch.no_grad()
143
+ @spaces.GPU(duration=120)
144
+ def inference(net, prompt, negative_prompt, guidance_scale, ddim_steps, seed):
145
+ mean.to(device)
146
+ std.to(device)
147
+ v.to(device)
148
+
149
+ weights = torch.load(net).to(device)
150
+ network = LoRAw2w(weights, mean, std, v[:, :1000],
151
+ unet,
152
  rank=1,
153
  multiplier=1.0,
154
  alpha=27.0,
 
196
  image = image.detach().cpu().float().permute(0, 2, 3, 1).numpy()[0]
197
 
198
  image = Image.fromarray((image * 255).round().astype("uint8"))
199
+
200
+ del network
201
 
202
  return image
203
 
 
292
 
293
  return image
294
 
295
+ # @torch.no_grad()
296
+ # @spaces.GPU(duration=120)
297
+ # def sample_then_run(self):
298
+ # self.unet = UNet2DConditionModel.from_pretrained(
299
+ # "stablediffusionapi/realistic-vision-v51" , subfolder="unet", revision=None
300
+ # )
301
+ # self.unet.to(self.device, dtype=torch.bfloat16)
302
+ # self.weights = sample_weights(self.unet, self.proj, self.mean, self.std, self.v[:, :1000], self.device, factor = 1.00)
303
+
304
+ # prompt = "sks person"
305
+ # negative_prompt = "low quality, blurry, unfinished, nudity, weapon"
306
+ # seed = 5
307
+ # cfg = 3.0
308
+ # steps = 25
309
+ # image = self.inference(prompt, negative_prompt, cfg, steps, seed)
310
+ # torch.save(self.weights.cpu().detach(), "model.pt" )
311
+ # return image, "model.pt"
312
 
313
 
314
 
 
456
 
457
 
458
  with gr.Blocks(css="style.css") as demo:
459
+ net = gr.State()
460
  gr.HTML(intro)
461
 
462
  gr.Markdown("""<div style="text-align: justify;"> In this demo, you can get an identity-encoding model by sampling or inverting. To use a model previously downloaded from this demo see \"Uploading a model\" in the Advanced Options. Next, you can generate new images from it, or edit the identity encoded in the model and generate images from the edited model. We provide detailed instructions and tips at the bottom of the page.""")
 
533
  # outputs = [input_image, file_output])
534
 
535
 
536
+ sample.click(fn=sample_then_run,inputs = [net], outputs=[net, input_image])
537
 
538
  # submit.click(
539
  # fn=model.edit_inference, inputs=[prompt, negative_prompt, cfg, steps, seed, injection_step, a1, a2, a3, a4], outputs=[gallery]