hujiecpp commited on
Commit
33c3163
·
1 Parent(s): a443523

init project

Browse files
Files changed (1) hide show
  1. app.py +23 -4
app.py CHANGED
@@ -496,6 +496,12 @@ def get_reconstructed_scene(outdir, filelist, schedule='linear', niter=300, min_
496
  then run get_3D_model_from_scene
497
  """
498
 
 
 
 
 
 
 
499
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
500
 
501
  MAST3R_CKP = 'naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric'
@@ -521,8 +527,7 @@ def get_reconstructed_scene(outdir, filelist, schedule='linear', niter=300, min_
521
  YOLO8_CKP='./checkpoints/ObjectAwareModel.pt'
522
  yolov8 = ObjectAwareModel(YOLO8_CKP)
523
 
524
- if len(filelist) < 2:
525
- raise gradio.Error("Please input at least 2 images.")
526
 
527
  images = Images(filelist=filelist, device=device)
528
 
@@ -635,9 +640,13 @@ with gradio.Blocks(css=""".gradio-container {margin: 0 !important; min-width: 10
635
  # scene state is save so that you can change conf_thr, cam_size... without rerunning the inference
636
  scene = gradio.State(None)
637
 
638
- gradio.HTML('<h2 style="text-align: center;">PE3R Demo</h2>')
 
 
639
  with gradio.Column():
640
- inputfiles = gradio.File(file_count="multiple")
 
 
641
 
642
  run_btn = gradio.Button("Reconstruct")
643
 
@@ -648,6 +657,16 @@ with gradio.Blocks(css=""".gradio-container {margin: 0 !important; min-width: 10
648
  find_btn = gradio.Button("Find")
649
 
650
  outmodel = gradio.Model3D()
 
 
 
 
 
 
 
 
 
 
651
  # events
652
 
653
  run_btn.click(fn=recon_fun,
 
496
  then run get_3D_model_from_scene
497
  """
498
 
499
+ if len(filelist) < 2:
500
+ raise gradio.Error("Please input at least 2 images.")
501
+ if len(filelist) > 8:
502
+ raise gradio.Error("Please input less than 8 images.")
503
+
504
+
505
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
506
 
507
  MAST3R_CKP = 'naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric'
 
527
  YOLO8_CKP='./checkpoints/ObjectAwareModel.pt'
528
  yolov8 = ObjectAwareModel(YOLO8_CKP)
529
 
530
+
 
531
 
532
  images = Images(filelist=filelist, device=device)
533
 
 
640
  # scene state is save so that you can change conf_thr, cam_size... without rerunning the inference
641
  scene = gradio.State(None)
642
 
643
+ gradio.HTML('<h2 style="text-align: center;">PE3R: Perception-Efficient 3D Reconstruction</h2>')
644
+ gradio.HTML('<p style="text-align: center; font-size: 16px;">🪄 Take 2~3 photos with your phone, upload them, wait a few (3~5) minutes, and then start exploring your 3D world via text!<br>'
645
+ '✨ If you like this project, please consider giving us an encouraging star <a href="https://github.com/hujiecpp/PE3R" target="_blank">[github]</a>.</p>')
646
  with gradio.Column():
647
+ snapshot = gradio.Image(None, visible=False)
648
+
649
+ inputfiles = gradio.File(file_count="multiple", label="Input Images")
650
 
651
  run_btn = gradio.Button("Reconstruct")
652
 
 
657
  find_btn = gradio.Button("Find")
658
 
659
  outmodel = gradio.Model3D()
660
+
661
+ examples = gradio.Examples(
662
+ examples=[
663
+ ["./examples/1.png", ["./examples/1.png", "./examples/2.png", "./examples/3.png", "./examples/4.png"], "Table", 0.85],
664
+ ["./examples/5.png", ["./examples/5.png", "./examples/6.png", "./examples/7.png", "./examples/8.png"], "Christmas Tree", 0.96],
665
+ ],
666
+ inputs=[snapshot, inputfiles, text_input, threshold],
667
+ label="Example Inputs"
668
+ )
669
+
670
  # events
671
 
672
  run_btn.click(fn=recon_fun,