helloworld-S commited on
Commit
3223042
·
verified ·
1 Parent(s): d4dee83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -82
app.py CHANGED
@@ -131,24 +131,12 @@ captions = []
131
  face_btns = []
132
  det_btns = []
133
  vlm_btns = []
134
- accordions = []
135
  idip_checkboxes = []
136
- accordion_states = []
137
 
138
  def open_accordion_on_example_selection(*args):
139
- print("enter open_accordion_on_example_selection")
140
  len_images = (len(args)-11)//3
141
- images = list(args[:len_images])+[None,]*(num_inputs-len_images)
142
- outputs = []
143
- for i, img in enumerate(images):
144
- if img is not None:
145
- print(f"open accordions {i}")
146
- outputs.append(True)
147
- else:
148
- print(f"close accordions {i}")
149
- outputs.append(False)
150
- print("Setup all accordions", outputs)
151
- return outputs
152
 
153
  @spaces.GPU
154
  def generate_image(
@@ -160,7 +148,6 @@ def generate_image(
160
  single_attention, # 新增参数
161
  latent_dblora_scale_str,
162
  latent_sblora_scale_str, vae_lora_scale,
163
- indexs, # 新增参数
164
  *images_captions_faces, # Combine all unpacked arguments into one tuple
165
  ):
166
  torch.cuda.empty_cache()
@@ -170,13 +157,9 @@ def generate_image(
170
  images = list(images_captions_faces[:num_inputs])
171
  captions = list(images_captions_faces[num_inputs:2 * num_inputs])
172
  idips_checkboxes = list(images_captions_faces[2 * num_inputs:3 * num_inputs])
173
- images = [images[i] for i in indexs]
174
- captions = [captions[i] for i in indexs]
175
- idips_checkboxes = [idips_checkboxes[i] for i in indexs]
176
 
177
  print(f"Length of images: {len(images)}")
178
  print(f"Length of captions: {len(captions)}")
179
- print(f"Indexs: {indexs}")
180
 
181
  print(f"Control weight lambda: {control_weight_lambda}")
182
  if control_weight_lambda != "no":
@@ -301,28 +284,16 @@ def generate_image(
301
 
302
  return image
303
 
304
- def create_image_input(index, open=True, indexs_state=None):
305
- accordion_state = gr.State(open)
306
  with gr.Column():
307
- with gr.Accordion(f"Input Image {index + 1}", open=accordion_state.value) as accordion:
308
- image = gr.Image(type="filepath", label=f"Image {index + 1}")
309
- caption = gr.Textbox(label=f"Caption {index + 1}", value="")
310
- id_ip_checkbox = gr.Checkbox(value=False, label=f"ID or not {index + 1}", visible=True)
311
- with gr.Row():
312
- vlm_btn = gr.Button("Auto Caption")
313
- det_btn = gr.Button("Det & Seg")
314
- face_btn = gr.Button("Crop Face")
315
- accordion.expand(
316
- inputs=[indexs_state],
317
- fn = lambda x: update_inputs(True, index, x),
318
- outputs=[indexs_state, accordion_state],
319
- )
320
- accordion.collapse(
321
- inputs=[indexs_state],
322
- fn = lambda x: update_inputs(False, index, x),
323
- outputs=[indexs_state, accordion_state],
324
- )
325
- return image, caption, face_btn, det_btn, vlm_btn, accordion_state, accordion, id_ip_checkbox
326
 
327
 
328
  def merge_instances(orig_img, indices, ins_bboxes, ins_images):
@@ -342,42 +313,10 @@ def merge_instances(orig_img, indices, ins_bboxes, ins_images):
342
  img = final_img.crop(bbox)
343
  return img, bbox
344
 
345
- def change_accordion(at: bool, index: int, state: list):
346
- print("change_accordion", index, state)
347
- print("at:", at)
348
- # indexs = state
349
- # if at:
350
- # if index not in indexs:
351
- # indexs.append(index)
352
- # else:
353
- # if index in indexs:
354
- # indexs.remove(index)
355
-
356
- # # 确保 indexs 是有序的
357
- # indexs.sort()
358
- # print("After changed", indexs)
359
- # return gr.Accordion(open=at), indexs
360
-
361
- def update_inputs(is_open, index, state: list):
362
- indexs = state
363
- if is_open:
364
- if index not in indexs:
365
- indexs.append(index)
366
- else:
367
- if index in indexs:
368
- indexs.remove(index)
369
-
370
- # 确保 indexs 是有序的
371
- indexs.sort()
372
- print("After update input", indexs)
373
- return indexs, is_open
374
-
375
  if __name__ == "__main__":
376
 
377
  with gr.Blocks() as demo:
378
 
379
- indexs_state = gr.State([0, 1]) # 添加状态来存储 indexs
380
-
381
  gr.Markdown("### XVerse Demo")
382
  with gr.Row():
383
  with gr.Column():
@@ -386,16 +325,13 @@ if __name__ == "__main__":
386
  clear_btn = gr.Button("清空输入图像")
387
  with gr.Row():
388
  for i in range(num_inputs):
389
- image, caption, face_btn, det_btn, vlm_btn, accordion_state, accordion, id_ip_checkbox = create_image_input(i, open=i<2, indexs_state=indexs_state)
390
  images.append(image)
391
  idip_checkboxes.append(id_ip_checkbox)
392
  captions.append(caption)
393
  face_btns.append(face_btn)
394
  det_btns.append(det_btn)
395
  vlm_btns.append(vlm_btn)
396
- accordion_states.append(accordion_state)
397
-
398
- accordions.append(accordion)
399
 
400
  # 将其他设置参数压缩到 Advanced Accordion 内
401
  with gr.Accordion("Advanced", open=False):
@@ -481,9 +417,9 @@ if __name__ == "__main__":
481
  single_attention = gr.Checkbox(value=True, label="Single Attention", visible=False)
482
 
483
  with gr.Column():
484
- output = gr.Image(label="生成的图像")
485
  seed = gr.Number(value=42, label="Seed", info="")
486
- gen_btn = gr.Button("生成图像")
487
 
488
  gr.Markdown("### Examples")
489
  gen_btn.click(
@@ -493,7 +429,6 @@ if __name__ == "__main__":
493
  vae_skip_iter, weight_id_ip_str,
494
  double_attention, single_attention,
495
  db_latent_lora_scale_str, sb_latent_lora_scale_str, vae_lora_scale_str,
496
- indexs_state, # 传递 indexs 状态
497
  *images,
498
  *captions,
499
  *idip_checkboxes,
@@ -509,8 +444,6 @@ if __name__ == "__main__":
509
  face_btns[i].click(crop_face_img, inputs=[images[i]], outputs=[images[i]])
510
  det_btns[i].click(det_seg_img, inputs=[images[i], captions[i]], outputs=[images[i]])
511
  vlm_btns[i].click(vlm_img_caption, inputs=[images[i]], outputs=[captions[i]])
512
- accordion_states[i].change(fn=lambda x, state, index=i: change_accordion(x, index, state), inputs=[accordion_states[i], indexs_state])
513
-
514
 
515
  examples = gr.Examples(
516
  examples=[
@@ -570,7 +503,7 @@ if __name__ == "__main__":
570
  vae_skip_iter_s1,
571
  vae_skip_iter_s2,
572
  ],
573
- outputs=accordion_states,
574
  fn=open_accordion_on_example_selection,
575
  run_on_click=True,
576
  label="Examples"
 
131
  face_btns = []
132
  det_btns = []
133
  vlm_btns = []
 
134
  idip_checkboxes = []
 
135
 
136
  def open_accordion_on_example_selection(*args):
137
+ # print("enter open_accordion_on_example_selection")
138
  len_images = (len(args)-11)//3
139
+ return [None,]*(num_inputs-len_images)*3
 
 
 
 
 
 
 
 
 
 
140
 
141
  @spaces.GPU
142
  def generate_image(
 
148
  single_attention, # 新增参数
149
  latent_dblora_scale_str,
150
  latent_sblora_scale_str, vae_lora_scale,
 
151
  *images_captions_faces, # Combine all unpacked arguments into one tuple
152
  ):
153
  torch.cuda.empty_cache()
 
157
  images = list(images_captions_faces[:num_inputs])
158
  captions = list(images_captions_faces[num_inputs:2 * num_inputs])
159
  idips_checkboxes = list(images_captions_faces[2 * num_inputs:3 * num_inputs])
 
 
 
160
 
161
  print(f"Length of images: {len(images)}")
162
  print(f"Length of captions: {len(captions)}")
 
163
 
164
  print(f"Control weight lambda: {control_weight_lambda}")
165
  if control_weight_lambda != "no":
 
284
 
285
  return image
286
 
287
+ def create_image_input(index):
 
288
  with gr.Column():
289
+ image = gr.Image(type="filepath", label=f"Image {index + 1}")
290
+ caption = gr.Textbox(label=f"Caption {index + 1}", value="")
291
+ id_ip_checkbox = gr.Checkbox(value=False, label=f"ID or not {index + 1}", visible=True)
292
+ with gr.Row():
293
+ vlm_btn = gr.Button("Auto Caption")
294
+ det_btn = gr.Button("Det & Seg")
295
+ face_btn = gr.Button("Crop Face")
296
+ return image, caption, face_btn, det_btn, vlm_btn, id_ip_checkbox
 
 
 
 
 
 
 
 
 
 
 
297
 
298
 
299
  def merge_instances(orig_img, indices, ins_bboxes, ins_images):
 
313
  img = final_img.crop(bbox)
314
  return img, bbox
315
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
  if __name__ == "__main__":
317
 
318
  with gr.Blocks() as demo:
319
 
 
 
320
  gr.Markdown("### XVerse Demo")
321
  with gr.Row():
322
  with gr.Column():
 
325
  clear_btn = gr.Button("清空输入图像")
326
  with gr.Row():
327
  for i in range(num_inputs):
328
+ image, caption, face_btn, det_btn, vlm_btn, id_ip_checkbox = create_image_input(i)
329
  images.append(image)
330
  idip_checkboxes.append(id_ip_checkbox)
331
  captions.append(caption)
332
  face_btns.append(face_btn)
333
  det_btns.append(det_btn)
334
  vlm_btns.append(vlm_btn)
 
 
 
335
 
336
  # 将其他设置参数压缩到 Advanced Accordion 内
337
  with gr.Accordion("Advanced", open=False):
 
417
  single_attention = gr.Checkbox(value=True, label="Single Attention", visible=False)
418
 
419
  with gr.Column():
420
+ output = gr.Image(label="Generated Image")
421
  seed = gr.Number(value=42, label="Seed", info="")
422
+ gen_btn = gr.Button("Generate Image")
423
 
424
  gr.Markdown("### Examples")
425
  gen_btn.click(
 
429
  vae_skip_iter, weight_id_ip_str,
430
  double_attention, single_attention,
431
  db_latent_lora_scale_str, sb_latent_lora_scale_str, vae_lora_scale_str,
 
432
  *images,
433
  *captions,
434
  *idip_checkboxes,
 
444
  face_btns[i].click(crop_face_img, inputs=[images[i]], outputs=[images[i]])
445
  det_btns[i].click(det_seg_img, inputs=[images[i], captions[i]], outputs=[images[i]])
446
  vlm_btns[i].click(vlm_img_caption, inputs=[images[i]], outputs=[captions[i]])
 
 
447
 
448
  examples = gr.Examples(
449
  examples=[
 
503
  vae_skip_iter_s1,
504
  vae_skip_iter_s2,
505
  ],
506
+ outputs=images[3:]+captions[3:]+idip_checkboxes[3:],
507
  fn=open_accordion_on_example_selection,
508
  run_on_click=True,
509
  label="Examples"