Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -112,23 +112,43 @@ thick = debias(thick, "Black_Hair", df, pinverse, device)
|
|
112 |
thick = debias(thick, "Brown_Hair", df, pinverse, device)
|
113 |
thick = debias(thick, "Pale_Skin", df, pinverse, device)
|
114 |
thick = debias(thick, "Heavy_Makeup", df, pinverse, device)
|
115 |
-
|
|
|
116 |
|
117 |
@torch.no_grad()
|
118 |
-
@spaces.GPU
|
119 |
-
def
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
rank=1,
|
133 |
multiplier=1.0,
|
134 |
alpha=27.0,
|
@@ -176,6 +196,8 @@ def inference(self, prompt, negative_prompt, guidance_scale, ddim_steps, seed):
|
|
176 |
image = image.detach().cpu().float().permute(0, 2, 3, 1).numpy()[0]
|
177 |
|
178 |
image = Image.fromarray((image * 255).round().astype("uint8"))
|
|
|
|
|
179 |
|
180 |
return image
|
181 |
|
@@ -270,23 +292,23 @@ def edit_inference(self, prompt, negative_prompt, guidance_scale, ddim_steps, se
|
|
270 |
|
271 |
return image
|
272 |
|
273 |
-
@torch.no_grad()
|
274 |
-
@spaces.GPU(duration=120)
|
275 |
-
def sample_then_run(self):
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
|
291 |
|
292 |
|
@@ -434,7 +456,7 @@ intro = """
|
|
434 |
|
435 |
|
436 |
with gr.Blocks(css="style.css") as demo:
|
437 |
-
|
438 |
gr.HTML(intro)
|
439 |
|
440 |
gr.Markdown("""<div style="text-align: justify;"> In this demo, you can get an identity-encoding model by sampling or inverting. To use a model previously downloaded from this demo see \"Uploading a model\" in the Advanced Options. Next, you can generate new images from it, or edit the identity encoded in the model and generate images from the edited model. We provide detailed instructions and tips at the bottom of the page.""")
|
@@ -511,7 +533,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
511 |
# outputs = [input_image, file_output])
|
512 |
|
513 |
|
514 |
-
|
515 |
|
516 |
# submit.click(
|
517 |
# fn=model.edit_inference, inputs=[prompt, negative_prompt, cfg, steps, seed, injection_step, a1, a2, a3, a4], outputs=[gallery]
|
|
|
112 |
thick = debias(thick, "Brown_Hair", df, pinverse, device)
|
113 |
thick = debias(thick, "Pale_Skin", df, pinverse, device)
|
114 |
thick = debias(thick, "Heavy_Makeup", df, pinverse, device)
|
115 |
+
|
116 |
+
|
117 |
|
118 |
@torch.no_grad()
|
119 |
+
@spaces.GPU
|
120 |
+
def sample_then_run(net):
|
121 |
+
print(net)
|
122 |
+
# get mean and standard deviation for each principal component
|
123 |
+
m = torch.mean(proj, 0)
|
124 |
+
standev = torch.std(proj, 0)
|
125 |
+
|
126 |
+
# sample
|
127 |
+
sample = torch.zeros([1, 1000]).to(device)
|
128 |
+
for i in range(1000):
|
129 |
+
sample[0, i] = torch.normal(m[i], standev[i], (1,1))
|
130 |
|
131 |
+
net = "model_"+str(uuid.uuid4())[:4]+".pt"
|
132 |
+
torch.save(sample, net)
|
133 |
+
|
134 |
+
image = prompt = "sks person"
|
135 |
+
negative_prompt = "low quality, blurry, unfinished, nudity, weapon"
|
136 |
+
seed = 5
|
137 |
+
cfg = 3.0
|
138 |
+
steps = 25
|
139 |
+
image = inference(net, prompt, negative_prompt, cfg, steps, seed)
|
140 |
+
return net, image
|
141 |
+
|
142 |
+
@torch.no_grad()
|
143 |
+
@spaces.GPU(duration=120)
|
144 |
+
def inference(net, prompt, negative_prompt, guidance_scale, ddim_steps, seed):
|
145 |
+
mean.to(device)
|
146 |
+
std.to(device)
|
147 |
+
v.to(device)
|
148 |
+
|
149 |
+
weights = torch.load(net).to(device)
|
150 |
+
network = LoRAw2w(weights, mean, std, v[:, :1000],
|
151 |
+
unet,
|
152 |
rank=1,
|
153 |
multiplier=1.0,
|
154 |
alpha=27.0,
|
|
|
196 |
image = image.detach().cpu().float().permute(0, 2, 3, 1).numpy()[0]
|
197 |
|
198 |
image = Image.fromarray((image * 255).round().astype("uint8"))
|
199 |
+
|
200 |
+
del network
|
201 |
|
202 |
return image
|
203 |
|
|
|
292 |
|
293 |
return image
|
294 |
|
295 |
+
# @torch.no_grad()
|
296 |
+
# @spaces.GPU(duration=120)
|
297 |
+
# def sample_then_run(self):
|
298 |
+
# self.unet = UNet2DConditionModel.from_pretrained(
|
299 |
+
# "stablediffusionapi/realistic-vision-v51" , subfolder="unet", revision=None
|
300 |
+
# )
|
301 |
+
# self.unet.to(self.device, dtype=torch.bfloat16)
|
302 |
+
# self.weights = sample_weights(self.unet, self.proj, self.mean, self.std, self.v[:, :1000], self.device, factor = 1.00)
|
303 |
+
|
304 |
+
# prompt = "sks person"
|
305 |
+
# negative_prompt = "low quality, blurry, unfinished, nudity, weapon"
|
306 |
+
# seed = 5
|
307 |
+
# cfg = 3.0
|
308 |
+
# steps = 25
|
309 |
+
# image = self.inference(prompt, negative_prompt, cfg, steps, seed)
|
310 |
+
# torch.save(self.weights.cpu().detach(), "model.pt" )
|
311 |
+
# return image, "model.pt"
|
312 |
|
313 |
|
314 |
|
|
|
456 |
|
457 |
|
458 |
with gr.Blocks(css="style.css") as demo:
|
459 |
+
net = gr.State()
|
460 |
gr.HTML(intro)
|
461 |
|
462 |
gr.Markdown("""<div style="text-align: justify;"> In this demo, you can get an identity-encoding model by sampling or inverting. To use a model previously downloaded from this demo see \"Uploading a model\" in the Advanced Options. Next, you can generate new images from it, or edit the identity encoded in the model and generate images from the edited model. We provide detailed instructions and tips at the bottom of the page.""")
|
|
|
533 |
# outputs = [input_image, file_output])
|
534 |
|
535 |
|
536 |
+
sample.click(fn=sample_then_run,inputs = [net], outputs=[net, input_image])
|
537 |
|
538 |
# submit.click(
|
539 |
# fn=model.edit_inference, inputs=[prompt, negative_prompt, cfg, steps, seed, injection_step, a1, a2, a3, a4], outputs=[gallery]
|