Jie Hu
commited on
Commit
·
d17054d
1
Parent(s):
61e5f12
init project
Browse files
app.py
CHANGED
@@ -37,10 +37,14 @@ from modules.mobilesamv2.utils.transforms import ResizeLongestSide
|
|
37 |
from modules.pe3r.models import Models
|
38 |
import torchvision.transforms as tvf
|
39 |
|
|
|
40 |
|
41 |
silent = False
|
42 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
43 |
-
pe3r = Models(device)
|
|
|
|
|
|
|
44 |
|
45 |
|
46 |
def _convert_scene_output_to_glb(outdir, imgs, pts3d, mask, focals, cams2world, cam_size=0.05,
|
@@ -469,7 +473,7 @@ def get_reconstructed_scene(outdir, filelist, schedule, niter, min_conf_thr,
|
|
469 |
scenegraph_type = scenegraph_type + "-" + str(refid)
|
470 |
|
471 |
pairs = make_pairs(imgs, scene_graph=scenegraph_type, prefilter=None, symmetrize=True)
|
472 |
-
output = inference(pairs,
|
473 |
mode = GlobalAlignerMode.PointCloudOptimizer if len(imgs) > 2 else GlobalAlignerMode.PairViewer
|
474 |
scene_1 = global_aligner(output, cog_seg_maps, rev_cog_seg_maps, cog_feats, device=device, mode=mode, verbose=not silent)
|
475 |
lr = 0.01
|
@@ -482,7 +486,7 @@ def get_reconstructed_scene(outdir, filelist, schedule, niter, min_conf_thr,
|
|
482 |
# print(imgs[i]['img'].shape, scene.imgs[i].shape, ImgNorm(scene.imgs[i])[None])
|
483 |
imgs[i]['img'] = ImgNorm(scene_1.imgs[i])[None]
|
484 |
pairs = make_pairs(imgs, scene_graph=scenegraph_type, prefilter=None, symmetrize=True)
|
485 |
-
output = inference(pairs,
|
486 |
mode = GlobalAlignerMode.PointCloudOptimizer if len(imgs) > 2 else GlobalAlignerMode.PairViewer
|
487 |
scene = global_aligner(output, cog_seg_maps, rev_cog_seg_maps, cog_feats, device=device, mode=mode, verbose=not silent)
|
488 |
ori_imgs = scene.ori_imgs
|
|
|
37 |
from modules.pe3r.models import Models
|
38 |
import torchvision.transforms as tvf
|
39 |
|
40 |
+
from modules.mast3r.model import AsymmetricMASt3R
|
41 |
|
42 |
silent = False
|
43 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
44 |
+
# pe3r = Models(device)
|
45 |
+
MAST3R_CKP = 'naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric'
|
46 |
+
mast3r = AsymmetricMASt3R.from_pretrained(MAST3R_CKP).to(device)
|
47 |
+
|
48 |
|
49 |
|
50 |
def _convert_scene_output_to_glb(outdir, imgs, pts3d, mask, focals, cams2world, cam_size=0.05,
|
|
|
473 |
scenegraph_type = scenegraph_type + "-" + str(refid)
|
474 |
|
475 |
pairs = make_pairs(imgs, scene_graph=scenegraph_type, prefilter=None, symmetrize=True)
|
476 |
+
output = inference(pairs, mast3r, device, batch_size=1, verbose=not silent)
|
477 |
mode = GlobalAlignerMode.PointCloudOptimizer if len(imgs) > 2 else GlobalAlignerMode.PairViewer
|
478 |
scene_1 = global_aligner(output, cog_seg_maps, rev_cog_seg_maps, cog_feats, device=device, mode=mode, verbose=not silent)
|
479 |
lr = 0.01
|
|
|
486 |
# print(imgs[i]['img'].shape, scene.imgs[i].shape, ImgNorm(scene.imgs[i])[None])
|
487 |
imgs[i]['img'] = ImgNorm(scene_1.imgs[i])[None]
|
488 |
pairs = make_pairs(imgs, scene_graph=scenegraph_type, prefilter=None, symmetrize=True)
|
489 |
+
output = inference(pairs, mast3r, device, batch_size=1, verbose=not silent)
|
490 |
mode = GlobalAlignerMode.PointCloudOptimizer if len(imgs) > 2 else GlobalAlignerMode.PairViewer
|
491 |
scene = global_aligner(output, cog_seg_maps, rev_cog_seg_maps, cog_feats, device=device, mode=mode, verbose=not silent)
|
492 |
ori_imgs = scene.ori_imgs
|