hujiecpp commited on
Commit
7398aa3
·
1 Parent(s): b65ac4b

init project

Browse files
Files changed (3) hide show
  1. app.py +2 -5
  2. modules/pe3r/demo.py +4 -2
  3. modules/pe3r/models.py +1 -5
app.py CHANGED
@@ -9,8 +9,7 @@ sys.path.append(os.path.abspath('./modules'))
9
  import argparse
10
 
11
  from modules.pe3r.demo import main_demo
12
- from modules.pe3r.models import Models
13
- import torch
14
 
15
  # def set_print_with_timestamp(time_format="%Y-%m-%d %H:%M:%S"):
16
  # builtin_print = builtins.print
@@ -51,9 +50,7 @@ if __name__ == '__main__':
51
  else:
52
  server_name = '0.0.0.0' if args.local_network else '127.0.0.1'
53
 
54
- pe3r = Models()
55
-
56
  with tempfile.TemporaryDirectory(suffix='pe3r_gradio_demo') as tmpdirname:
57
  if not args.silent:
58
  print('Outputing stuff in', tmpdirname)
59
- main_demo(tmpdirname, pe3r, server_name, args.server_port, silent=args.silent)
 
9
  import argparse
10
 
11
  from modules.pe3r.demo import main_demo
12
+ # import torch
 
13
 
14
  # def set_print_with_timestamp(time_format="%Y-%m-%d %H:%M:%S"):
15
  # builtin_print = builtins.print
 
50
  else:
51
  server_name = '0.0.0.0' if args.local_network else '127.0.0.1'
52
 
 
 
53
  with tempfile.TemporaryDirectory(suffix='pe3r_gradio_demo') as tmpdirname:
54
  if not args.silent:
55
  print('Outputing stuff in', tmpdirname)
56
+ main_demo(tmpdirname, server_name, args.server_port, silent=args.silent)
modules/pe3r/demo.py CHANGED
@@ -32,7 +32,7 @@ from typing import Any, Dict, Generator,List
32
  import matplotlib.pyplot as pl
33
 
34
  from modules.mobilesamv2.utils.transforms import ResizeLongestSide
35
-
36
 
37
  def _convert_scene_output_to_glb(outdir, imgs, pts3d, mask, focals, cams2world, cam_size=0.05,
38
  cam_color=None, as_pointcloud=False,
@@ -550,8 +550,10 @@ def set_scenegraph_options(inputfiles, winsize, refid, scenegraph_type):
550
 
551
 
552
  @spaces.GPU(duration=180)
553
- def main_demo(tmpdirname, pe3r, server_name, server_port, silent=False):
554
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
 
555
  # scene, outfile, imgs = get_reconstructed_scene(
556
  # outdir=tmpdirname, pe3r=pe3r, device=device, silent=silent,
557
  # filelist=['/home/hujie/pe3r/datasets/mipnerf360_ov/bonsai/black_chair/images/DSCF5590.png',
 
32
  import matplotlib.pyplot as pl
33
 
34
  from modules.mobilesamv2.utils.transforms import ResizeLongestSide
35
+ from modules.pe3r.models import Models
36
 
37
  def _convert_scene_output_to_glb(outdir, imgs, pts3d, mask, focals, cams2world, cam_size=0.05,
38
  cam_color=None, as_pointcloud=False,
 
550
 
551
 
552
  @spaces.GPU(duration=180)
553
+ def main_demo(tmpdirname, server_name, server_port, silent=False):
554
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
555
+
556
+ pe3r = Models(device)
557
  # scene, outfile, imgs = get_reconstructed_scene(
558
  # outdir=tmpdirname, pe3r=pe3r, device=device, silent=silent,
559
  # filelist=['/home/hujie/pe3r/datasets/mipnerf360_ov/bonsai/black_chair/images/DSCF5590.png',
modules/pe3r/models.py CHANGED
@@ -10,13 +10,9 @@ from modules.mobilesamv2.promt_mobilesamv2 import ObjectAwareModel
10
  from modules.mobilesamv2 import sam_model_registry
11
 
12
  from sam2.sam2_video_predictor import SAM2VideoPredictor
13
- import spaces
14
- import torch
15
 
16
  class Models:
17
- @spaces.GPU(duration=180)
18
- def __init__(self, device='cpu'):
19
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
20
  # -- mast3r --
21
  # MAST3R_CKP = './checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth'
22
  MAST3R_CKP = 'naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric'
 
10
  from modules.mobilesamv2 import sam_model_registry
11
 
12
  from sam2.sam2_video_predictor import SAM2VideoPredictor
 
 
13
 
14
  class Models:
15
+ def __init__(self, device):
 
 
16
  # -- mast3r --
17
  # MAST3R_CKP = './checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth'
18
  MAST3R_CKP = 'naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric'