WYBar commited on
Commit
a162d7b
·
1 Parent(s): e8292cf
Files changed (1) hide show
  1. app.py +60 -28
app.py CHANGED
@@ -345,7 +345,7 @@ def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, device, do_s
345
  inputs = tokenizer(
346
  input_intension, return_tensors="pt"
347
  ).to(model.lm.device)
348
- print(inputs.device)
349
  print("tokenizer2")
350
 
351
  stopping_criteria = StoppingCriteriaList()
@@ -401,6 +401,8 @@ def inference(generate_method, intention, model, quantizer, tokenizer, width, he
401
 
402
  # @spaces.GPU(enable_queue=True, duration=60)
403
  def construction():
 
 
404
  from custom_model_mmdit import CustomFluxTransformer2DModel
405
  from custom_model_transp_vae import AutoencoderKLTransformerTraining as CustomVAE
406
  from custom_pipeline import CustomFluxPipelineCfg
@@ -429,7 +431,7 @@ def construction():
429
  ).to("cuda")
430
  pipeline.enable_model_cpu_offload(gpu_id=0) # Save GPU memory
431
 
432
- return pipeline, transp_vae
433
 
434
  @spaces.GPU(duration=60)
435
  def test_one_sample(validation_box, validation_prompt, true_gs, inference_steps, pipeline, generator, transp_vae):
@@ -461,6 +463,7 @@ def test_one_sample(validation_box, validation_prompt, true_gs, inference_steps,
461
  return output_gradio
462
 
463
  def svg_test_one_sample(validation_prompt, validation_box_str, seed, true_gs, inference_steps, pipeline, transp_vae):
 
464
  generator = torch.Generator().manual_seed(seed)
465
  try:
466
  validation_box = ast.literal_eval(validation_box_str)
@@ -471,8 +474,9 @@ def svg_test_one_sample(validation_prompt, validation_box_str, seed, true_gs, in
471
 
472
  validation_box = adjust_validation_box(validation_box)
473
 
 
474
  result_images = test_one_sample(validation_box, validation_prompt, true_gs, inference_steps, pipeline, generator, transp_vae)
475
-
476
  svg_img = pngs_to_svg(result_images[1:])
477
 
478
  svg_file_path = './image.svg'
@@ -491,6 +495,31 @@ def svg_test_one_sample(validation_prompt, validation_box_str, seed, true_gs, in
491
  raise ValueError(f"文件 {svg_file_path} 内容为空")
492
 
493
  return result_images, svg_file_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494
 
495
  def main():
496
  model, quantizer, tokenizer, width, height, device = construction_layout()
@@ -550,36 +579,39 @@ def main():
550
  # json_file = "/home/wyb/openseg_blob/v-yanbin/GradioDemo/LLM-For-Layout-Planning/inference_test.json"
551
  # return wholecaption, str(list_box), json_file
552
 
553
- pipeline, transp_vae = construction()
 
554
 
555
- gradio_test_one_sample_partial = partial(
556
- svg_test_one_sample,
557
- pipeline=pipeline,
558
- transp_vae=transp_vae,
559
- )
560
 
561
- def process_svg(text_input, tuple_input, seed, true_gs, inference_steps):
562
- result_images = []
563
- result_images, svg_file_path = gradio_test_one_sample_partial(text_input, tuple_input, seed, true_gs, inference_steps)
 
 
564
 
565
- url, unique_filename = upload_to_github(file_path=svg_file_path)
566
- unique_filename = f'{unique_filename}'
567
 
568
- if url != None:
569
- print(f"File uploaded to: {url}")
570
- svg_editor = f"""
571
- <iframe src="https://svgedit.netlify.app/editor/index.html?\
572
- storagePrompt=false&url={url}" \
573
- width="100%", height="800px"></iframe>
574
- """
575
- else:
576
- print('upload_to_github FAILED!')
577
- svg_editor = f"""
578
- <iframe src="https://svgedit.netlify.app/editor/index.html" \
579
- width="100%", height="800px"></iframe>
580
- """
581
 
582
- return result_images, svg_file_path, svg_editor
583
 
584
  def one_click_generate(intention_input, temperature, top_p, seed, true_gs, inference_steps):
585
  # 首先调用process_preddate
 
345
  inputs = tokenizer(
346
  input_intension, return_tensors="pt"
347
  ).to(model.lm.device)
348
+ # print(inputs.device)
349
  print("tokenizer2")
350
 
351
  stopping_criteria = StoppingCriteriaList()
 
401
 
402
  # @spaces.GPU(enable_queue=True, duration=60)
403
  def construction():
404
+ global pipeline
405
+ global transp_vae
406
  from custom_model_mmdit import CustomFluxTransformer2DModel
407
  from custom_model_transp_vae import AutoencoderKLTransformerTraining as CustomVAE
408
  from custom_pipeline import CustomFluxPipelineCfg
 
431
  ).to("cuda")
432
  pipeline.enable_model_cpu_offload(gpu_id=0) # Save GPU memory
433
 
434
+ # return pipeline, transp_vae
435
 
436
  @spaces.GPU(duration=60)
437
  def test_one_sample(validation_box, validation_prompt, true_gs, inference_steps, pipeline, generator, transp_vae):
 
463
  return output_gradio
464
 
465
  def svg_test_one_sample(validation_prompt, validation_box_str, seed, true_gs, inference_steps, pipeline, transp_vae):
466
+ print("svg_test_one_sample")
467
  generator = torch.Generator().manual_seed(seed)
468
  try:
469
  validation_box = ast.literal_eval(validation_box_str)
 
474
 
475
  validation_box = adjust_validation_box(validation_box)
476
 
477
+ print("result_images = test_one_sample")
478
  result_images = test_one_sample(validation_box, validation_prompt, true_gs, inference_steps, pipeline, generator, transp_vae)
479
+ print("after result_images = test_one_sample")
480
  svg_img = pngs_to_svg(result_images[1:])
481
 
482
  svg_file_path = './image.svg'
 
495
  raise ValueError(f"文件 {svg_file_path} 内容为空")
496
 
497
  return result_images, svg_file_path
498
+
499
+ def process_svg(text_input, tuple_input, seed, true_gs, inference_steps):
500
+ print("precess_svg")
501
+ result_images = []
502
+ result_images, svg_file_path = svg_test_one_sample(text_input, tuple_input, seed, true_gs, inference_steps, pipeline=pipeline, transp_vae=transp_vae)
503
+ # result_images, svg_file_path = gradio_test_one_sample_partial(text_input, tuple_input, seed, true_gs, inference_steps)
504
+
505
+ url, unique_filename = upload_to_github(file_path=svg_file_path)
506
+ unique_filename = f'{unique_filename}'
507
+
508
+ if url != None:
509
+ print(f"File uploaded to: {url}")
510
+ svg_editor = f"""
511
+ <iframe src="https://svgedit.netlify.app/editor/index.html?\
512
+ storagePrompt=false&url={url}" \
513
+ width="100%", height="800px"></iframe>
514
+ """
515
+ else:
516
+ print('upload_to_github FAILED!')
517
+ svg_editor = f"""
518
+ <iframe src="https://svgedit.netlify.app/editor/index.html" \
519
+ width="100%", height="800px"></iframe>
520
+ """
521
+
522
+ return result_images, svg_file_path, svg_editor
523
 
524
  def main():
525
  model, quantizer, tokenizer, width, height, device = construction_layout()
 
579
  # json_file = "/home/wyb/openseg_blob/v-yanbin/GradioDemo/LLM-For-Layout-Planning/inference_test.json"
580
  # return wholecaption, str(list_box), json_file
581
 
582
+ # pipeline, transp_vae = construction()
583
+ construction()
584
 
585
+ # gradio_test_one_sample_partial = partial(
586
+ # svg_test_one_sample,
587
+ # pipeline=pipeline,
588
+ # transp_vae=transp_vae,
589
+ # )
590
 
591
+ # def process_svg(text_input, tuple_input, seed, true_gs, inference_steps):
592
+ # print("precess_svg")
593
+ # result_images = []
594
+ # result_images, svg_file_path = svg_test_one_sample(text_input, tuple_input, seed, true_gs, inference_steps, pipeline=pipeline, transp_vae=transp_vae)
595
+ # # result_images, svg_file_path = gradio_test_one_sample_partial(text_input, tuple_input, seed, true_gs, inference_steps)
596
 
597
+ # url, unique_filename = upload_to_github(file_path=svg_file_path)
598
+ # unique_filename = f'{unique_filename}'
599
 
600
+ # if url != None:
601
+ # print(f"File uploaded to: {url}")
602
+ # svg_editor = f"""
603
+ # <iframe src="https://svgedit.netlify.app/editor/index.html?\
604
+ # storagePrompt=false&url={url}" \
605
+ # width="100%", height="800px"></iframe>
606
+ # """
607
+ # else:
608
+ # print('upload_to_github FAILED!')
609
+ # svg_editor = f"""
610
+ # <iframe src="https://svgedit.netlify.app/editor/index.html" \
611
+ # width="100%", height="800px"></iframe>
612
+ # """
613
 
614
+ # return result_images, svg_file_path, svg_editor
615
 
616
  def one_click_generate(intention_input, temperature, top_p, seed, true_gs, inference_steps):
617
  # 首先调用process_preddate