WYBar commited on
Commit
3a4bde2
·
1 Parent(s): d3c9995
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -365,6 +365,7 @@ def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, do_sample=Fa
365
  print(f"evaluate_v1 {model.device} {model.lm.device} {pipeline.device}")
366
  model = model.to("cuda")
367
  print(f"after evaluate_v1 {model.device} {model.lm.device} {pipeline.device}")
 
368
  json_example = inputs
369
  input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
370
 
@@ -471,7 +472,7 @@ def process_preddate(intention, temperature, top_p, generate_method='v1'):
471
 
472
  @spaces.GPU(duration=120)
473
  def test_one_sample(validation_box, validation_prompt, true_gs, inference_steps, pipeline, generator, transp_vae):
474
- print(validation_box)
475
  output, rgba_output, _, _ = pipeline(
476
  prompt=validation_prompt,
477
  validation_box=validation_box,
@@ -500,7 +501,8 @@ def test_one_sample(validation_box, validation_prompt, true_gs, inference_steps,
500
  return output_gradio
501
 
502
  def gradio_test_one_sample(validation_prompt, validation_box_str, seed, true_gs, inference_steps, pipeline, transp_vae):
503
- print(f"svg_test_one_sample {model.device} {model.lm.device} {pipeline.device}")
 
504
  # generator = torch.Generator().manual_seed(seed)
505
  generator = torch.Generator(device=torch.device("cuda", index=0)).manual_seed(seed)
506
  try:
@@ -538,7 +540,8 @@ def gradio_test_one_sample(validation_prompt, validation_box_str, seed, true_gs,
538
  return result_images, svg_file_path
539
 
540
  def process_svg(text_input, tuple_input, seed, true_gs, inference_steps):
541
- print(f"precess_svg {model.device} {model.lm.device} {pipeline.device}")
 
542
  result_images = []
543
  result_images, svg_file_path = gradio_test_one_sample(text_input, tuple_input, seed, true_gs, inference_steps, pipeline=pipeline, transp_vae=transp_vae)
544
  # result_images, svg_file_path = gradio_test_one_sample_partial(text_input, tuple_input, seed, true_gs, inference_steps)
 
365
  print(f"evaluate_v1 {model.device} {model.lm.device} {pipeline.device}")
366
  model = model.to("cuda")
367
  print(f"after evaluate_v1 {model.device} {model.lm.device} {pipeline.device}")
368
+
369
  json_example = inputs
370
  input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
371
 
 
472
 
473
  @spaces.GPU(duration=120)
474
  def test_one_sample(validation_box, validation_prompt, true_gs, inference_steps, pipeline, generator, transp_vae):
475
+ print(f"test_one_sample: {validation_box}")
476
  output, rgba_output, _, _ = pipeline(
477
  prompt=validation_prompt,
478
  validation_box=validation_box,
 
501
  return output_gradio
502
 
503
  def gradio_test_one_sample(validation_prompt, validation_box_str, seed, true_gs, inference_steps, pipeline, transp_vae):
504
+ print(f"svg_test_one_sample")
505
+ # print(f"svg_test_one_sample {model.device} {model.lm.device} {pipeline.device}")
506
  # generator = torch.Generator().manual_seed(seed)
507
  generator = torch.Generator(device=torch.device("cuda", index=0)).manual_seed(seed)
508
  try:
 
540
  return result_images, svg_file_path
541
 
542
  def process_svg(text_input, tuple_input, seed, true_gs, inference_steps):
543
+ print(f"precess_svg")
544
+ # print(f"precess_svg {model.device} {model.lm.device} {pipeline.device}")
545
  result_images = []
546
  result_images, svg_file_path = gradio_test_one_sample(text_input, tuple_input, seed, true_gs, inference_steps, pipeline=pipeline, transp_vae=transp_vae)
547
  # result_images, svg_file_path = gradio_test_one_sample_partial(text_input, tuple_input, seed, true_gs, inference_steps)