Update app.py
Browse files
app.py
CHANGED
@@ -18,6 +18,8 @@ from PIL import Image
|
|
18 |
import subprocess
|
19 |
|
20 |
import torch
|
|
|
|
|
21 |
import gradio as gr
|
22 |
import string
|
23 |
import random, time, math
|
@@ -441,86 +443,88 @@ with gr.Blocks() as demo:
|
|
441 |
with gr.Row():
|
442 |
with gr.Column():
|
443 |
prompt = gr.Textbox(label="Prompt", value="")
|
444 |
-
with gr.
|
445 |
-
target_height = gr.Slider(512, 1024, step=128, value=768, label="Generated Height", info="")
|
446 |
-
target_width = gr.Slider(512, 1024, step=128, value=768, label="Generated Width", info="")
|
447 |
-
cond_size = gr.Slider(256, 384, step=128, value=256, label="Condition Size", info="")
|
448 |
-
with gr.Row():
|
449 |
-
weight_id = gr.Slider(0.1, 5, step=0.1, value=3, label="weight_id")
|
450 |
-
weight_ip = gr.Slider(0.1, 5, step=0.1, value=5, label="weight_ip")
|
451 |
-
with gr.Row():
|
452 |
-
ip_scale_str = gr.Slider(0.5, 1.5, step=0.01, value=0.85, label="latent_lora_scale")
|
453 |
-
vae_lora_scale = gr.Slider(0.5, 1.5, step=0.01, value=1.3, label="vae_lora_scale")
|
454 |
-
with gr.Row():
|
455 |
-
vae_skip_iter_s1 = gr.Slider(0, 1, step=0.01, value=0.05, label="vae_skip_iter_before")
|
456 |
-
vae_skip_iter_s2 = gr.Slider(0, 1, step=0.01, value=0.8, label="vae_skip_iter_after")
|
457 |
-
|
458 |
-
|
459 |
-
with gr.Row():
|
460 |
-
weight_id_ip_str = gr.Textbox(
|
461 |
-
value="0-1:1/3/5",
|
462 |
-
label="weight_id_ip_str",
|
463 |
-
interactive=False, visible=False
|
464 |
-
)
|
465 |
-
weight_id.change(
|
466 |
-
lambda s1, s2: f"0-1:1/{s1}/{s2}",
|
467 |
-
inputs=[weight_id, weight_ip],
|
468 |
-
outputs=weight_id_ip_str
|
469 |
-
)
|
470 |
-
weight_ip.change(
|
471 |
-
lambda s1, s2: f"0-1:1/{s1}/{s2}",
|
472 |
-
inputs=[weight_id, weight_ip],
|
473 |
-
outputs=weight_id_ip_str
|
474 |
-
)
|
475 |
-
vae_skip_iter = gr.Textbox(
|
476 |
-
value="0-0.05:1,0.8-1:1",
|
477 |
-
label="vae_skip_iter",
|
478 |
-
interactive=False, visible=False
|
479 |
-
)
|
480 |
-
vae_skip_iter_s1.change(
|
481 |
-
lambda s1, s2: f"0-{s1}:1,{s2}-1:1",
|
482 |
-
inputs=[vae_skip_iter_s1, vae_skip_iter_s2],
|
483 |
-
outputs=vae_skip_iter
|
484 |
-
)
|
485 |
-
vae_skip_iter_s2.change(
|
486 |
-
lambda s1, s2: f"0-{s1}:1,{s2}-1:1",
|
487 |
-
inputs=[vae_skip_iter_s1, vae_skip_iter_s2],
|
488 |
-
outputs=vae_skip_iter
|
489 |
-
)
|
490 |
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
value="
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
label="
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
outputs=vae_lora_scale_str
|
512 |
)
|
513 |
-
|
514 |
-
lambda
|
515 |
-
inputs=
|
516 |
-
outputs=
|
517 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
518 |
|
519 |
-
|
520 |
-
double_attention = gr.Checkbox(value=False, label="Double Attention", visible=False)
|
521 |
-
single_attention = gr.Checkbox(value=True, label="Single Attention", visible=False)
|
522 |
-
|
523 |
-
clear_btn = gr.Button("清空输入图像")
|
524 |
with gr.Row():
|
525 |
for i in range(num_inputs):
|
526 |
image, caption, face_btn, det_btn, vlm_btn, accordion_state, accordion, id_ip_checkbox = create_image_input(i, open=i<2, indexs_state=indexs_state)
|
|
|
18 |
import subprocess
|
19 |
|
20 |
import torch
|
21 |
+
import torch.multiprocessing as mp
|
22 |
+
mp.set_start_method('spawn', force=True)
|
23 |
import gradio as gr
|
24 |
import string
|
25 |
import random, time, math
|
|
|
443 |
with gr.Row():
|
444 |
with gr.Column():
|
445 |
prompt = gr.Textbox(label="Prompt", value="")
|
446 |
+
with gr.Tab("Tiger"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
447 |
|
448 |
+
with gr.Row():
|
449 |
+
target_height = gr.Slider(512, 1024, step=128, value=768, label="Generated Height", info="")
|
450 |
+
target_width = gr.Slider(512, 1024, step=128, value=768, label="Generated Width", info="")
|
451 |
+
cond_size = gr.Slider(256, 384, step=128, value=256, label="Condition Size", info="")
|
452 |
+
with gr.Row():
|
453 |
+
weight_id = gr.Slider(0.1, 5, step=0.1, value=3, label="weight_id")
|
454 |
+
weight_ip = gr.Slider(0.1, 5, step=0.1, value=5, label="weight_ip")
|
455 |
+
with gr.Row():
|
456 |
+
ip_scale_str = gr.Slider(0.5, 1.5, step=0.01, value=0.85, label="latent_lora_scale")
|
457 |
+
vae_lora_scale = gr.Slider(0.5, 1.5, step=0.01, value=1.3, label="vae_lora_scale")
|
458 |
+
with gr.Row():
|
459 |
+
vae_skip_iter_s1 = gr.Slider(0, 1, step=0.01, value=0.05, label="vae_skip_iter_before")
|
460 |
+
vae_skip_iter_s2 = gr.Slider(0, 1, step=0.01, value=0.8, label="vae_skip_iter_after")
|
461 |
+
|
462 |
+
|
463 |
+
with gr.Row():
|
464 |
+
weight_id_ip_str = gr.Textbox(
|
465 |
+
value="0-1:1/3/5",
|
466 |
+
label="weight_id_ip_str",
|
467 |
+
interactive=False, visible=False
|
|
|
468 |
)
|
469 |
+
weight_id.change(
|
470 |
+
lambda s1, s2: f"0-1:1/{s1}/{s2}",
|
471 |
+
inputs=[weight_id, weight_ip],
|
472 |
+
outputs=weight_id_ip_str
|
473 |
)
|
474 |
+
weight_ip.change(
|
475 |
+
lambda s1, s2: f"0-1:1/{s1}/{s2}",
|
476 |
+
inputs=[weight_id, weight_ip],
|
477 |
+
outputs=weight_id_ip_str
|
478 |
+
)
|
479 |
+
vae_skip_iter = gr.Textbox(
|
480 |
+
value="0-0.05:1,0.8-1:1",
|
481 |
+
label="vae_skip_iter",
|
482 |
+
interactive=False, visible=False
|
483 |
+
)
|
484 |
+
vae_skip_iter_s1.change(
|
485 |
+
lambda s1, s2: f"0-{s1}:1,{s2}-1:1",
|
486 |
+
inputs=[vae_skip_iter_s1, vae_skip_iter_s2],
|
487 |
+
outputs=vae_skip_iter
|
488 |
+
)
|
489 |
+
vae_skip_iter_s2.change(
|
490 |
+
lambda s1, s2: f"0-{s1}:1,{s2}-1:1",
|
491 |
+
inputs=[vae_skip_iter_s1, vae_skip_iter_s2],
|
492 |
+
outputs=vae_skip_iter
|
493 |
+
)
|
494 |
+
|
495 |
+
|
496 |
+
with gr.Row():
|
497 |
+
db_latent_lora_scale_str = gr.Textbox(
|
498 |
+
value="0-1:0.85",
|
499 |
+
label="db_latent_lora_scale_str",
|
500 |
+
interactive=False, visible=False
|
501 |
+
)
|
502 |
+
sb_latent_lora_scale_str = gr.Textbox(
|
503 |
+
value="0-1:0.85",
|
504 |
+
label="sb_latent_lora_scale_str",
|
505 |
+
interactive=False, visible=False
|
506 |
+
)
|
507 |
+
vae_lora_scale_str = gr.Textbox(
|
508 |
+
value="0-1:1.3",
|
509 |
+
label="vae_lora_scale_str",
|
510 |
+
interactive=False, visible=False
|
511 |
+
)
|
512 |
+
vae_lora_scale.change(
|
513 |
+
lambda s: f"0-1:{s}",
|
514 |
+
inputs=vae_lora_scale,
|
515 |
+
outputs=vae_lora_scale_str
|
516 |
+
)
|
517 |
+
ip_scale_str.change(
|
518 |
+
lambda s: [f"0-1:{s}", f"0-1:{s}"],
|
519 |
+
inputs=ip_scale_str,
|
520 |
+
outputs=[db_latent_lora_scale_str, sb_latent_lora_scale_str]
|
521 |
+
)
|
522 |
+
|
523 |
+
with gr.Row():
|
524 |
+
double_attention = gr.Checkbox(value=False, label="Double Attention", visible=False)
|
525 |
+
single_attention = gr.Checkbox(value=True, label="Single Attention", visible=False)
|
526 |
|
527 |
+
clear_btn = gr.Button("清空输入图像")
|
|
|
|
|
|
|
|
|
528 |
with gr.Row():
|
529 |
for i in range(num_inputs):
|
530 |
image, caption, face_btn, det_btn, vlm_btn, accordion_state, accordion, id_ip_checkbox = create_image_input(i, open=i<2, indexs_state=indexs_state)
|