amildravid4292 commited on
Commit
a841325
·
verified ·
1 Parent(s): 8c3366f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -37
app.py CHANGED
@@ -23,8 +23,8 @@ import spaces
23
 
24
 
25
  models_path = snapshot_download(repo_id="Snapchat/w2w")
26
- device = "cuda"
27
 
 
28
  mean = torch.load(f"{models_path}/files/mean.pt", map_location=torch.device('cpu')).bfloat16().to(device)
29
  std = torch.load(f"{models_path}/files/std.pt", map_location=torch.device('cpu')).bfloat16().to(device)
30
  v = torch.load(f"{models_path}/files/V.pt", map_location=torch.device('cpu')).bfloat16().to(device)
@@ -34,8 +34,7 @@ weight_dimensions = torch.load(f"{models_path}/files/weight_dimensions.pt")
34
  pinverse = torch.load(f"{models_path}/files/pinverse_1000pc.pt", map_location=torch.device('cpu')).bfloat16().to(device)
35
 
36
 
37
-
38
-
39
 
40
 
41
  young = get_direction(df, "Young", pinverse, 1000, device)
@@ -79,10 +78,9 @@ thick = debias(thick, "Brown_Hair", df, pinverse, device)
79
  thick = debias(thick, "Pale_Skin", df, pinverse, device)
80
  thick = debias(thick, "Heavy_Makeup", df, pinverse, device)
81
 
82
- def sample_model():
83
- global unet
84
  del unet
85
- global network
86
  mean.to(device)
87
  std.to(device)
88
  v.to(device)
@@ -93,13 +91,6 @@ def sample_model():
93
  @torch.no_grad()
94
  @spaces.GPU
95
  def inference( prompt, negative_prompt, guidance_scale, ddim_steps, seed):
96
- global device
97
- #global generator
98
- global unet
99
- global vae
100
- global text_encoder
101
- global tokenizer
102
- global noise_scheduler
103
  generator = torch.Generator(device=device).manual_seed(seed)
104
  latents = torch.randn(
105
  (1, unet.in_channels, 512 // 8, 512 // 8),
@@ -396,29 +387,6 @@ intro = """
396
 
397
 
398
  with gr.Blocks(css="style.css") as demo:
399
- generator = gr.State()
400
- unet = gr.State()
401
- vae = gr.State()
402
- text_encoder = gr.State()
403
- tokenizer = gr.State()
404
- noise_scheduler = gr.State()
405
- network = gr.State()
406
- #device = gr.State(torch.device("cuda"))
407
- device = "cuda"
408
- #generator = torch.Generator(device=device)
409
- young = gr.State()
410
- pointy = gr.State()
411
- wavy = gr.State()
412
- thick = gr.State()
413
- unet, vae, text_encoder, tokenizer, noise_scheduler = load_models(device)
414
-
415
-
416
-
417
-
418
-
419
-
420
-
421
-
422
  gr.HTML(intro)
423
 
424
  gr.Markdown("""<div style="text-align: justify;"> In this demo, you can get an identity-encoding model by sampling or inverting. To use a model previously downloaded from this demo see \"Uploading a model\" in the Advanced Options. Next, you can generate new images from it, or edit the identity encoded in the model and generate images from the edited model. We provide detailed instructions and tips at the bottom of the page.""")
@@ -495,7 +463,7 @@ with gr.Blocks(css="style.css") as demo:
495
  outputs = [input_image, file_output])
496
 
497
 
498
- sample.click(fn=sample_then_run, outputs=[input_image, file_output])
499
 
500
  submit.click(
501
  fn=edit_inference, inputs=[prompt, negative_prompt, cfg, steps, seed, injection_step, a1, a2, a3, a4], outputs=[gallery]
 
23
 
24
 
25
  models_path = snapshot_download(repo_id="Snapchat/w2w")
 
26
 
27
+ device = "cuda"
28
  mean = torch.load(f"{models_path}/files/mean.pt", map_location=torch.device('cpu')).bfloat16().to(device)
29
  std = torch.load(f"{models_path}/files/std.pt", map_location=torch.device('cpu')).bfloat16().to(device)
30
  v = torch.load(f"{models_path}/files/V.pt", map_location=torch.device('cpu')).bfloat16().to(device)
 
34
  pinverse = torch.load(f"{models_path}/files/pinverse_1000pc.pt", map_location=torch.device('cpu')).bfloat16().to(device)
35
 
36
 
37
+ unet, vae, text_encoder, tokenizer, noise_scheduler = load_models(device)
 
38
 
39
 
40
  young = get_direction(df, "Young", pinverse, 1000, device)
 
78
  thick = debias(thick, "Pale_Skin", df, pinverse, device)
79
  thick = debias(thick, "Heavy_Makeup", df, pinverse, device)
80
 
81
+ def sample_model(unet, network):
 
82
  del unet
83
+ del network
84
  mean.to(device)
85
  std.to(device)
86
  v.to(device)
 
91
  @torch.no_grad()
92
  @spaces.GPU
93
  def inference( prompt, negative_prompt, guidance_scale, ddim_steps, seed):
 
 
 
 
 
 
 
94
  generator = torch.Generator(device=device).manual_seed(seed)
95
  latents = torch.randn(
96
  (1, unet.in_channels, 512 // 8, 512 // 8),
 
387
 
388
 
389
  with gr.Blocks(css="style.css") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390
  gr.HTML(intro)
391
 
392
  gr.Markdown("""<div style="text-align: justify;"> In this demo, you can get an identity-encoding model by sampling or inverting. To use a model previously downloaded from this demo see \"Uploading a model\" in the Advanced Options. Next, you can generate new images from it, or edit the identity encoded in the model and generate images from the edited model. We provide detailed instructions and tips at the bottom of the page.""")
 
463
  outputs = [input_image, file_output])
464
 
465
 
466
+ sample.click(fn=sample_then_run, inputs=[unet, network] outputs=[input_image, file_output])
467
 
468
  submit.click(
469
  fn=edit_inference, inputs=[prompt, negative_prompt, cfg, steps, seed, injection_step, a1, a2, a3, a4], outputs=[gallery]