Update app.py
Browse files
app.py
CHANGED
@@ -209,7 +209,7 @@ def generate_image(
|
|
209 |
ip_scale,
|
210 |
latent_sblora_scale_str, vae_lora_scale,
|
211 |
indexs, # 新增参数
|
212 |
-
|
213 |
):
|
214 |
torch.cuda.empty_cache()
|
215 |
num_images = 1
|
@@ -349,6 +349,28 @@ def generate_image(
|
|
349 |
|
350 |
return image
|
351 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
352 |
|
353 |
|
354 |
def merge_instances(orig_img, indices, ins_bboxes, ins_images):
|
@@ -490,31 +512,24 @@ if __name__ == "__main__":
|
|
490 |
single_attention = gr.Checkbox(value=True, label="Single Attention", visible=False)
|
491 |
|
492 |
clear_btn = gr.Button("清空输入图像")
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
caption_2 = gr.Textbox(label=f"Caption 2", value="")
|
506 |
-
id_ip_checkbox_2 = gr.Checkbox(value=False, label=f"ID or not 2", visible=True)
|
507 |
-
with gr.Row():
|
508 |
-
vlm_btn_2 = gr.Button("Auto Caption")
|
509 |
-
det_btn_2 = gr.Button("Det & Seg")
|
510 |
-
face_btn_2 = gr.Button("Crop Face")
|
511 |
|
512 |
with gr.Column():
|
513 |
output = gr.Image(label="生成的图像")
|
514 |
seed = gr.Number(value=42, label="Seed", info="")
|
515 |
gen_btn = gr.Button("生成图像")
|
516 |
-
|
517 |
-
gr.Markdown("### Examples")
|
518 |
gen_btn.click(
|
519 |
generate_image,
|
520 |
inputs=[
|
@@ -530,17 +545,17 @@ if __name__ == "__main__":
|
|
530 |
outputs=output
|
531 |
)
|
532 |
|
533 |
-
|
534 |
-
#
|
535 |
-
|
536 |
-
face_btn_1.click(crop_face_img, inputs=[image_1], outputs=[image_1])
|
537 |
-
det_btn_1.click(det_seg_img, inputs=[image_1, caption_1], outputs=[image_1])
|
538 |
-
vlm_btn_1.click(vlm_img_caption, inputs=[image_1], outputs=[caption_1])
|
539 |
-
|
540 |
-
face_btn_2.click(crop_face_img, inputs=[image_2], outputs=[image_2])
|
541 |
-
det_btn_2.click(det_seg_img, inputs=[image_2, caption_2], outputs=[image_2])
|
542 |
-
vlm_btn_2.click(vlm_img_caption, inputs=[image_2], outputs=[caption_2])
|
543 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
544 |
|
545 |
demo.queue()
|
546 |
demo.launch()
|
|
|
209 |
ip_scale,
|
210 |
latent_sblora_scale_str, vae_lora_scale,
|
211 |
indexs, # 新增参数
|
212 |
+
*images_captions_faces, # Combine all unpacked arguments into one tuple
|
213 |
):
|
214 |
torch.cuda.empty_cache()
|
215 |
num_images = 1
|
|
|
349 |
|
350 |
return image
|
351 |
|
352 |
+
def create_image_input(index, open=True, indexs_state=None):
|
353 |
+
accordion_state = gr.State(open)
|
354 |
+
with gr.Column():
|
355 |
+
with gr.Accordion(f"Input Image {index + 1}", open=accordion_state.value) as accordion:
|
356 |
+
image = gr.Image(type="filepath", label=f"Image {index + 1}")
|
357 |
+
caption = gr.Textbox(label=f"Caption {index + 1}", value="")
|
358 |
+
id_ip_checkbox = gr.Checkbox(value=False, label=f"ID or not {index + 1}", visible=True)
|
359 |
+
with gr.Row():
|
360 |
+
vlm_btn = gr.Button("Auto Caption")
|
361 |
+
det_btn = gr.Button("Det & Seg")
|
362 |
+
face_btn = gr.Button("Crop Face")
|
363 |
+
accordion.expand(
|
364 |
+
inputs=[indexs_state],
|
365 |
+
fn = lambda x: update_inputs(True, index, x),
|
366 |
+
outputs=[indexs_state, accordion_state],
|
367 |
+
)
|
368 |
+
accordion.collapse(
|
369 |
+
inputs=[indexs_state],
|
370 |
+
fn = lambda x: update_inputs(False, index, x),
|
371 |
+
outputs=[indexs_state, accordion_state],
|
372 |
+
)
|
373 |
+
return image, caption, face_btn, det_btn, vlm_btn, accordion_state, accordion, id_ip_checkbox
|
374 |
|
375 |
|
376 |
def merge_instances(orig_img, indices, ins_bboxes, ins_images):
|
|
|
512 |
single_attention = gr.Checkbox(value=True, label="Single Attention", visible=False)
|
513 |
|
514 |
clear_btn = gr.Button("清空输入图像")
|
515 |
+
with gr.Row():
|
516 |
+
for i in range(num_inputs):
|
517 |
+
image, caption, face_btn, det_btn, vlm_btn, accordion_state, accordion, id_ip_checkbox = create_image_input(i, open=i<2, indexs_state=indexs_state)
|
518 |
+
images.append(image)
|
519 |
+
idip_checkboxes.append(id_ip_checkbox)
|
520 |
+
captions.append(caption)
|
521 |
+
face_btns.append(face_btn)
|
522 |
+
det_btns.append(det_btn)
|
523 |
+
vlm_btns.append(vlm_btn)
|
524 |
+
accordion_states.append(accordion_state)
|
525 |
+
|
526 |
+
accordions.append(accordion)
|
|
|
|
|
|
|
|
|
|
|
|
|
527 |
|
528 |
with gr.Column():
|
529 |
output = gr.Image(label="生成的图像")
|
530 |
seed = gr.Number(value=42, label="Seed", info="")
|
531 |
gen_btn = gr.Button("生成图像")
|
532 |
+
|
|
|
533 |
gen_btn.click(
|
534 |
generate_image,
|
535 |
inputs=[
|
|
|
545 |
outputs=output
|
546 |
)
|
547 |
|
548 |
+
|
549 |
+
# 修改清空函数的输出参数
|
550 |
+
clear_btn.click(clear_images, outputs=images)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
551 |
|
552 |
+
# 循环绑定 Det & Seg 和 Auto Caption 按钮的点击事件
|
553 |
+
for i in range(num_inputs):
|
554 |
+
face_btns[i].click(crop_face_img, inputs=[images[i]], outputs=[images[i]])
|
555 |
+
det_btns[i].click(det_seg_img, inputs=[images[i], captions[i]], outputs=[images[i]])
|
556 |
+
vlm_btns[i].click(vlm_img_caption, inputs=[images[i]], outputs=[captions[i]])
|
557 |
+
accordion_states[i].change(fn=lambda x, state, index=i: change_accordion(x, index, state), inputs=[accordion_states[i], indexs_state], outputs=[accordions[i], indexs_state])
|
558 |
+
|
559 |
|
560 |
demo.queue()
|
561 |
demo.launch()
|