init project
Browse files
app.py
CHANGED
@@ -11,7 +11,6 @@ sys.path.append(os.path.abspath('./modules'))
|
|
11 |
import math
|
12 |
import tempfile
|
13 |
import gradio
|
14 |
-
import os
|
15 |
import torch
|
16 |
import spaces
|
17 |
import numpy as np
|
@@ -44,395 +43,395 @@ device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
44 |
pe3r = Models(device)
|
45 |
|
46 |
|
47 |
-
def _convert_scene_output_to_glb(outdir, imgs, pts3d, mask, focals, cams2world, cam_size=0.05,
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
# @spaces.GPU(duration=180)
|
91 |
-
def get_3D_model_from_scene(outdir, silent, scene, min_conf_thr=3, as_pointcloud=False, mask_sky=False,
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
def mask_nms(masks, threshold=0.8):
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
def filter(masks, keep):
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
def mask_to_box(mask):
|
138 |
-
|
139 |
-
|
140 |
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
|
151 |
-
|
152 |
-
|
153 |
-
def box_xyxy_to_xywh(box_xyxy):
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
def get_seg_img(mask, box, image):
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
def pad_img(img):
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
def slerp(u1, u2, t):
|
192 |
-
|
193 |
-
|
194 |
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
|
206 |
-
|
207 |
-
|
208 |
|
209 |
-
|
210 |
-
|
211 |
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
|
218 |
-
|
219 |
-
|
220 |
|
221 |
-
|
222 |
-
|
223 |
|
224 |
-
def slerp_multiple(vectors, t_values):
|
225 |
-
|
226 |
-
|
227 |
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
|
237 |
-
|
238 |
-
|
239 |
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
|
246 |
-
|
247 |
-
|
248 |
-
@torch.no_grad
|
249 |
-
def get_mask_from_img_sam1(mobilesamv2, yolov8, sam1_image, yolov8_image, original_size, input_size, transform, device):
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
@torch.no_grad
|
302 |
-
def get_cog_feats(images, device):
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
|
395 |
-
|
396 |
-
|
397 |
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
|
435 |
-
|
436 |
|
437 |
@spaces.GPU(duration=180)
|
438 |
def get_reconstructed_scene(outdir, device, silent, filelist, schedule, niter, min_conf_thr,
|
@@ -520,21 +519,21 @@ def get_reconstructed_scene(outdir, device, silent, filelist, schedule, niter, m
|
|
520 |
|
521 |
# return scene, outfile, imgs
|
522 |
|
523 |
-
@spaces.GPU(duration=180)
|
524 |
-
def get_3D_object_from_scene(outdir, pe3r, silent, device, text, threshold, scene, min_conf_thr, as_pointcloud,
|
525 |
-
|
526 |
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
|
539 |
|
540 |
def set_scenegraph_options(inputfiles, winsize, refid, scenegraph_type):
|
@@ -560,8 +559,8 @@ def set_scenegraph_options(inputfiles, winsize, refid, scenegraph_type):
|
|
560 |
|
561 |
with tempfile.TemporaryDirectory(suffix='pe3r_gradio_demo') as tmpdirname:
|
562 |
recon_fun = functools.partial(get_reconstructed_scene, tmpdirname, pe3r, device, silent)
|
563 |
-
model_from_scene_fun = functools.partial(get_3D_model_from_scene, tmpdirname, silent)
|
564 |
-
get_3D_object_from_scene_fun = functools.partial(get_3D_object_from_scene, tmpdirname, pe3r, silent, device)
|
565 |
|
566 |
with gradio.Blocks(css=""".gradio-container {margin: 0 !important; min-width: 100%};""", title="PE3R Demo") as demo:
|
567 |
# scene state is save so that you can change conf_thr, cam_size... without rerunning the inference
|
@@ -623,32 +622,32 @@ with tempfile.TemporaryDirectory(suffix='pe3r_gradio_demo') as tmpdirname:
|
|
623 |
mask_sky, clean_depth, transparent_cams, cam_size,
|
624 |
scenegraph_type, winsize, refid],
|
625 |
outputs=[scene, outmodel, outgallery])
|
626 |
-
min_conf_thr.release(fn=model_from_scene_fun,
|
627 |
-
|
628 |
-
|
629 |
-
|
630 |
-
cam_size.change(fn=model_from_scene_fun,
|
631 |
-
|
632 |
-
|
633 |
-
|
634 |
-
as_pointcloud.change(fn=model_from_scene_fun,
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
mask_sky.change(fn=model_from_scene_fun,
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
-
clean_depth.change(fn=model_from_scene_fun,
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
transparent_cams.change(model_from_scene_fun,
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
find_btn.click(fn=get_3D_object_from_scene_fun,
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
demo.launch(show_error=True, share=None, server_name=None, server_port=None)
|
|
|
11 |
import math
|
12 |
import tempfile
|
13 |
import gradio
|
|
|
14 |
import torch
|
15 |
import spaces
|
16 |
import numpy as np
|
|
|
43 |
pe3r = Models(device)
|
44 |
|
45 |
|
46 |
+
# def _convert_scene_output_to_glb(outdir, imgs, pts3d, mask, focals, cams2world, cam_size=0.05,
|
47 |
+
# cam_color=None, as_pointcloud=False,
|
48 |
+
# transparent_cams=False, silent=False):
|
49 |
+
# assert len(pts3d) == len(mask) <= len(imgs) <= len(cams2world) == len(focals)
|
50 |
+
# pts3d = to_numpy(pts3d)
|
51 |
+
# imgs = to_numpy(imgs)
|
52 |
+
# focals = to_numpy(focals)
|
53 |
+
# cams2world = to_numpy(cams2world)
|
54 |
+
|
55 |
+
# scene = trimesh.Scene()
|
56 |
+
|
57 |
+
# # full pointcloud
|
58 |
+
# if as_pointcloud:
|
59 |
+
# pts = np.concatenate([p[m] for p, m in zip(pts3d, mask)])
|
60 |
+
# col = np.concatenate([p[m] for p, m in zip(imgs, mask)])
|
61 |
+
# pct = trimesh.PointCloud(pts.reshape(-1, 3), colors=col.reshape(-1, 3))
|
62 |
+
# scene.add_geometry(pct)
|
63 |
+
# else:
|
64 |
+
# meshes = []
|
65 |
+
# for i in range(len(imgs)):
|
66 |
+
# meshes.append(pts3d_to_trimesh(imgs[i], pts3d[i], mask[i]))
|
67 |
+
# mesh = trimesh.Trimesh(**cat_meshes(meshes))
|
68 |
+
# scene.add_geometry(mesh)
|
69 |
+
|
70 |
+
# # add each camera
|
71 |
+
# for i, pose_c2w in enumerate(cams2world):
|
72 |
+
# if isinstance(cam_color, list):
|
73 |
+
# camera_edge_color = cam_color[i]
|
74 |
+
# else:
|
75 |
+
# camera_edge_color = cam_color or CAM_COLORS[i % len(CAM_COLORS)]
|
76 |
+
# add_scene_cam(scene, pose_c2w, camera_edge_color,
|
77 |
+
# None if transparent_cams else imgs[i], focals[i],
|
78 |
+
# imsize=imgs[i].shape[1::-1], screen_width=cam_size)
|
79 |
+
|
80 |
+
# rot = np.eye(4)
|
81 |
+
# rot[:3, :3] = Rotation.from_euler('y', np.deg2rad(180)).as_matrix()
|
82 |
+
# scene.apply_transform(np.linalg.inv(cams2world[0] @ OPENGL @ rot))
|
83 |
+
# outfile = os.path.join(outdir, 'scene.glb')
|
84 |
+
# if not silent:
|
85 |
+
# print('(exporting 3D scene to', outfile, ')')
|
86 |
+
# scene.export(file_obj=outfile)
|
87 |
+
# return outfile
|
88 |
+
|
89 |
+
# # @spaces.GPU(duration=180)
|
90 |
+
# def get_3D_model_from_scene(outdir, silent, scene, min_conf_thr=3, as_pointcloud=False, mask_sky=False,
|
91 |
+
# clean_depth=False, transparent_cams=False, cam_size=0.05):
|
92 |
+
# """
|
93 |
+
# extract 3D_model (glb file) from a reconstructed scene
|
94 |
+
# """
|
95 |
+
# if scene is None:
|
96 |
+
# return None
|
97 |
+
# # post processes
|
98 |
+
# if clean_depth:
|
99 |
+
# scene = scene.clean_pointcloud()
|
100 |
+
# if mask_sky:
|
101 |
+
# scene = scene.mask_sky()
|
102 |
+
|
103 |
+
# # get optimized values from scene
|
104 |
+
# rgbimg = scene.ori_imgs
|
105 |
+
# focals = scene.get_focals().cpu()
|
106 |
+
# cams2world = scene.get_im_poses().cpu()
|
107 |
+
# # 3D pointcloud from depthmap, poses and intrinsics
|
108 |
+
# pts3d = to_numpy(scene.get_pts3d())
|
109 |
+
# scene.min_conf_thr = float(scene.conf_trf(torch.tensor(min_conf_thr)))
|
110 |
+
# msk = to_numpy(scene.get_masks())
|
111 |
+
# return _convert_scene_output_to_glb(outdir, rgbimg, pts3d, msk, focals, cams2world, as_pointcloud=as_pointcloud,
|
112 |
+
# transparent_cams=transparent_cams, cam_size=cam_size, silent=silent)
|
113 |
+
|
114 |
+
# def mask_nms(masks, threshold=0.8):
|
115 |
+
# keep = []
|
116 |
+
# mask_num = len(masks)
|
117 |
+
# suppressed = np.zeros((mask_num), dtype=np.int64)
|
118 |
+
# for i in range(mask_num):
|
119 |
+
# if suppressed[i] == 1:
|
120 |
+
# continue
|
121 |
+
# keep.append(i)
|
122 |
+
# for j in range(i + 1, mask_num):
|
123 |
+
# if suppressed[j] == 1:
|
124 |
+
# continue
|
125 |
+
# intersection = (masks[i] & masks[j]).sum()
|
126 |
+
# if min(intersection / masks[i].sum(), intersection / masks[j].sum()) > threshold:
|
127 |
+
# suppressed[j] = 1
|
128 |
+
# return keep
|
129 |
+
|
130 |
+
# def filter(masks, keep):
|
131 |
+
# ret = []
|
132 |
+
# for i, m in enumerate(masks):
|
133 |
+
# if i in keep: ret.append(m)
|
134 |
+
# return ret
|
135 |
+
|
136 |
+
# def mask_to_box(mask):
|
137 |
+
# if mask.sum() == 0:
|
138 |
+
# return np.array([0, 0, 0, 0])
|
139 |
|
140 |
+
# # Get the rows and columns where the mask is 1
|
141 |
+
# rows = np.any(mask, axis=1)
|
142 |
+
# cols = np.any(mask, axis=0)
|
143 |
|
144 |
+
# # Get top, bottom, left, right edges
|
145 |
+
# top = np.argmax(rows)
|
146 |
+
# bottom = len(rows) - 1 - np.argmax(np.flip(rows))
|
147 |
+
# left = np.argmax(cols)
|
148 |
+
# right = len(cols) - 1 - np.argmax(np.flip(cols))
|
149 |
|
150 |
+
# return np.array([left, top, right, bottom])
|
151 |
+
|
152 |
+
# def box_xyxy_to_xywh(box_xyxy):
|
153 |
+
# box_xywh = deepcopy(box_xyxy)
|
154 |
+
# box_xywh[2] = box_xywh[2] - box_xywh[0]
|
155 |
+
# box_xywh[3] = box_xywh[3] - box_xywh[1]
|
156 |
+
# return box_xywh
|
157 |
+
|
158 |
+
# def get_seg_img(mask, box, image):
|
159 |
+
# image = image.copy()
|
160 |
+
# x, y, w, h = box
|
161 |
+
# # image[mask == 0] = np.array([0, 0, 0], dtype=np.uint8)
|
162 |
+
# box_area = w * h
|
163 |
+
# mask_area = mask.sum()
|
164 |
+
# if 1 - (mask_area / box_area) < 0.2:
|
165 |
+
# image[mask == 0] = np.array([0, 0, 0], dtype=np.uint8)
|
166 |
+
# else:
|
167 |
+
# random_values = np.random.randint(0, 255, size=image.shape, dtype=np.uint8)
|
168 |
+
# image[mask == 0] = random_values[mask == 0]
|
169 |
+
# seg_img = image[y:y+h, x:x+w, ...]
|
170 |
+
# return seg_img
|
171 |
+
|
172 |
+
# def pad_img(img):
|
173 |
+
# h, w, _ = img.shape
|
174 |
+
# l = max(w,h)
|
175 |
+
# pad = np.zeros((l,l,3), dtype=np.uint8) #
|
176 |
+
# if h > w:
|
177 |
+
# pad[:,(h-w)//2:(h-w)//2 + w, :] = img
|
178 |
+
# else:
|
179 |
+
# pad[(w-h)//2:(w-h)//2 + h, :, :] = img
|
180 |
+
# return pad
|
181 |
+
|
182 |
+
# def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
|
183 |
+
# assert len(args) > 0 and all(
|
184 |
+
# len(a) == len(args[0]) for a in args
|
185 |
+
# ), "Batched iteration must have inputs of all the same size."
|
186 |
+
# n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
|
187 |
+
# for b in range(n_batches):
|
188 |
+
# yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
|
189 |
+
|
190 |
+
# def slerp(u1, u2, t):
|
191 |
+
# """
|
192 |
+
# Perform spherical linear interpolation (Slerp) between two unit vectors.
|
193 |
|
194 |
+
# Args:
|
195 |
+
# - u1 (torch.Tensor): First unit vector, shape (1024,)
|
196 |
+
# - u2 (torch.Tensor): Second unit vector, shape (1024,)
|
197 |
+
# - t (float): Interpolation parameter
|
198 |
|
199 |
+
# Returns:
|
200 |
+
# - torch.Tensor: Interpolated vector, shape (1024,)
|
201 |
+
# """
|
202 |
+
# # Compute the dot product
|
203 |
+
# dot_product = torch.sum(u1 * u2)
|
204 |
|
205 |
+
# # Ensure the dot product is within the valid range [-1, 1]
|
206 |
+
# dot_product = torch.clamp(dot_product, -1.0, 1.0)
|
207 |
|
208 |
+
# # Compute the angle between the vectors
|
209 |
+
# theta = torch.acos(dot_product)
|
210 |
|
211 |
+
# # Compute the coefficients for the interpolation
|
212 |
+
# sin_theta = torch.sin(theta)
|
213 |
+
# if sin_theta == 0:
|
214 |
+
# # Vectors are parallel, return a linear interpolation
|
215 |
+
# return u1 + t * (u2 - u1)
|
216 |
|
217 |
+
# s1 = torch.sin((1 - t) * theta) / sin_theta
|
218 |
+
# s2 = torch.sin(t * theta) / sin_theta
|
219 |
|
220 |
+
# # Perform the interpolation
|
221 |
+
# return s1 * u1 + s2 * u2
|
222 |
|
223 |
+
# def slerp_multiple(vectors, t_values):
|
224 |
+
# """
|
225 |
+
# Perform spherical linear interpolation (Slerp) for multiple vectors.
|
226 |
|
227 |
+
# Args:
|
228 |
+
# - vectors (torch.Tensor): Tensor of vectors, shape (n, 1024)
|
229 |
+
# - a_values (torch.Tensor): Tensor of values corresponding to each vector, shape (n,)
|
230 |
|
231 |
+
# Returns:
|
232 |
+
# - torch.Tensor: Interpolated vector, shape (1024,)
|
233 |
+
# """
|
234 |
+
# n = vectors.shape[0]
|
235 |
|
236 |
+
# # Initialize the interpolated vector with the first vector
|
237 |
+
# interpolated_vector = vectors[0]
|
238 |
|
239 |
+
# # Perform Slerp iteratively
|
240 |
+
# for i in range(1, n):
|
241 |
+
# # Perform Slerp between the current interpolated vector and the next vector
|
242 |
+
# t = t_values[i] / (t_values[i] + t_values[i-1])
|
243 |
+
# interpolated_vector = slerp(interpolated_vector, vectors[i], t)
|
244 |
|
245 |
+
# return interpolated_vector
|
246 |
+
|
247 |
+
# @torch.no_grad
|
248 |
+
# def get_mask_from_img_sam1(mobilesamv2, yolov8, sam1_image, yolov8_image, original_size, input_size, transform, device):
|
249 |
+
# sam_mask=[]
|
250 |
+
# img_area = original_size[0] * original_size[1]
|
251 |
+
|
252 |
+
# obj_results = yolov8(yolov8_image,device=device,retina_masks=False,imgsz=1024,conf=0.25,iou=0.95,verbose=False)
|
253 |
+
# input_boxes1 = obj_results[0].boxes.xyxy
|
254 |
+
# input_boxes1 = input_boxes1.cpu().numpy()
|
255 |
+
# input_boxes1 = transform.apply_boxes(input_boxes1, original_size)
|
256 |
+
# input_boxes = torch.from_numpy(input_boxes1).to(device)
|
257 |
|
258 |
+
# # obj_results = yolov8(yolov8_image,device=device,retina_masks=False,imgsz=512,conf=0.25,iou=0.9,verbose=False)
|
259 |
+
# # input_boxes2 = obj_results[0].boxes.xyxy
|
260 |
+
# # input_boxes2 = input_boxes2.cpu().numpy()
|
261 |
+
# # input_boxes2 = transform.apply_boxes(input_boxes2, original_size)
|
262 |
+
# # input_boxes2 = torch.from_numpy(input_boxes2).to(device)
|
263 |
+
|
264 |
+
# # input_boxes = torch.cat((input_boxes1, input_boxes2), dim=0)
|
265 |
+
|
266 |
+
# input_image = mobilesamv2.preprocess(sam1_image)
|
267 |
+
# image_embedding = mobilesamv2.image_encoder(input_image)['last_hidden_state']
|
268 |
+
|
269 |
+
# image_embedding=torch.repeat_interleave(image_embedding, 320, dim=0)
|
270 |
+
# prompt_embedding=mobilesamv2.prompt_encoder.get_dense_pe()
|
271 |
+
# prompt_embedding=torch.repeat_interleave(prompt_embedding, 320, dim=0)
|
272 |
+
# for (boxes,) in batch_iterator(320, input_boxes):
|
273 |
+
# with torch.no_grad():
|
274 |
+
# image_embedding=image_embedding[0:boxes.shape[0],:,:,:]
|
275 |
+
# prompt_embedding=prompt_embedding[0:boxes.shape[0],:,:,:]
|
276 |
+
# sparse_embeddings, dense_embeddings = mobilesamv2.prompt_encoder(
|
277 |
+
# points=None,
|
278 |
+
# boxes=boxes,
|
279 |
+
# masks=None,)
|
280 |
+
# low_res_masks, _ = mobilesamv2.mask_decoder(
|
281 |
+
# image_embeddings=image_embedding,
|
282 |
+
# image_pe=prompt_embedding,
|
283 |
+
# sparse_prompt_embeddings=sparse_embeddings,
|
284 |
+
# dense_prompt_embeddings=dense_embeddings,
|
285 |
+
# multimask_output=False,
|
286 |
+
# simple_type=True,
|
287 |
+
# )
|
288 |
+
# low_res_masks=mobilesamv2.postprocess_masks(low_res_masks, input_size, original_size)
|
289 |
+
# sam_mask_pre = (low_res_masks > mobilesamv2.mask_threshold)
|
290 |
+
# for mask in sam_mask_pre:
|
291 |
+
# if mask.sum() / img_area > 0.002:
|
292 |
+
# sam_mask.append(mask.squeeze(1))
|
293 |
+
# sam_mask=torch.cat(sam_mask)
|
294 |
+
# sorted_sam_mask = sorted(sam_mask, key=(lambda x: x.sum()), reverse=True)
|
295 |
+
# keep = mask_nms(sorted_sam_mask)
|
296 |
+
# ret_mask = filter(sorted_sam_mask, keep)
|
297 |
+
|
298 |
+
# return ret_mask
|
299 |
+
|
300 |
+
# @torch.no_grad
|
301 |
+
# def get_cog_feats(images, device):
|
302 |
+
# cog_seg_maps = []
|
303 |
+
# rev_cog_seg_maps = []
|
304 |
+
# inference_state = pe3r.sam2.init_state(images=images.sam2_images, video_height=images.sam2_video_size[0], video_width=images.sam2_video_size[1])
|
305 |
+
# mask_num = 0
|
306 |
+
|
307 |
+
# sam1_images = images.sam1_images
|
308 |
+
# sam1_images_size = images.sam1_images_size
|
309 |
+
# np_images = images.np_images
|
310 |
+
# np_images_size = images.np_images_size
|
311 |
|
312 |
+
# sam1_masks = get_mask_from_img_sam1(pe3r.mobilesamv2, pe3r.yolov8, sam1_images[0], np_images[0], np_images_size[0], sam1_images_size[0], images.sam1_transform, device)
|
313 |
+
# for mask in sam1_masks:
|
314 |
+
# _, _, _ = pe3r.sam2.add_new_mask(
|
315 |
+
# inference_state=inference_state,
|
316 |
+
# frame_idx=0,
|
317 |
+
# obj_id=mask_num,
|
318 |
+
# mask=mask,
|
319 |
+
# )
|
320 |
+
# mask_num += 1
|
321 |
+
|
322 |
+
# video_segments = {} # video_segments contains the per-frame segmentation results
|
323 |
+
# for out_frame_idx, out_obj_ids, out_mask_logits in pe3r.sam2.propagate_in_video(inference_state):
|
324 |
+
# sam2_masks = (out_mask_logits > 0.0).squeeze(1)
|
325 |
+
|
326 |
+
# video_segments[out_frame_idx] = {
|
327 |
+
# out_obj_id: sam2_masks[i].cpu().numpy()
|
328 |
+
# for i, out_obj_id in enumerate(out_obj_ids)
|
329 |
+
# }
|
330 |
+
|
331 |
+
# if out_frame_idx == 0:
|
332 |
+
# continue
|
333 |
+
|
334 |
+
# sam1_masks = get_mask_from_img_sam1(pe3r.mobilesamv2, pe3r.yolov8, sam1_images[out_frame_idx], np_images[out_frame_idx], np_images_size[out_frame_idx], sam1_images_size[out_frame_idx], images.sam1_transform, device)
|
335 |
+
|
336 |
+
# for sam1_mask in sam1_masks:
|
337 |
+
# flg = 1
|
338 |
+
# for sam2_mask in sam2_masks:
|
339 |
+
# # print(sam1_mask.shape, sam2_mask.shape)
|
340 |
+
# area1 = sam1_mask.sum()
|
341 |
+
# area2 = sam2_mask.sum()
|
342 |
+
# intersection = (sam1_mask & sam2_mask).sum()
|
343 |
+
# if min(intersection / area1, intersection / area2) > 0.25:
|
344 |
+
# flg = 0
|
345 |
+
# break
|
346 |
+
# if flg:
|
347 |
+
# video_segments[out_frame_idx][mask_num] = sam1_mask.cpu().numpy()
|
348 |
+
# mask_num += 1
|
349 |
+
|
350 |
+
# multi_view_clip_feats = torch.zeros((mask_num+1, 1024))
|
351 |
+
# multi_view_clip_feats_map = {}
|
352 |
+
# multi_view_clip_area_map = {}
|
353 |
+
# for now_frame in range(0, len(video_segments), 1):
|
354 |
+
# image = np_images[now_frame]
|
355 |
+
|
356 |
+
# seg_img_list = []
|
357 |
+
# out_obj_id_list = []
|
358 |
+
# out_obj_mask_list = []
|
359 |
+
# out_obj_area_list = []
|
360 |
+
# # NOTE: background: -1
|
361 |
+
# rev_seg_map = -np.ones(image.shape[:2], dtype=np.int64)
|
362 |
+
# sorted_dict_items = sorted(video_segments[now_frame].items(), key=lambda x: np.count_nonzero(x[1]), reverse=False)
|
363 |
+
# for out_obj_id, mask in sorted_dict_items:
|
364 |
+
# if mask.sum() == 0:
|
365 |
+
# continue
|
366 |
+
# rev_seg_map[mask] = out_obj_id
|
367 |
+
# rev_cog_seg_maps.append(rev_seg_map)
|
368 |
+
|
369 |
+
# seg_map = -np.ones(image.shape[:2], dtype=np.int64)
|
370 |
+
# sorted_dict_items = sorted(video_segments[now_frame].items(), key=lambda x: np.count_nonzero(x[1]), reverse=True)
|
371 |
+
# for out_obj_id, mask in sorted_dict_items:
|
372 |
+
# if mask.sum() == 0:
|
373 |
+
# continue
|
374 |
+
# box = np.int32(box_xyxy_to_xywh(mask_to_box(mask)))
|
375 |
|
376 |
+
# if box[2] == 0 and box[3] == 0:
|
377 |
+
# continue
|
378 |
+
# # print(box)
|
379 |
+
# seg_img = get_seg_img(mask, box, image)
|
380 |
+
# pad_seg_img = cv2.resize(pad_img(seg_img), (256,256))
|
381 |
+
# seg_img_list.append(pad_seg_img)
|
382 |
+
# seg_map[mask] = out_obj_id
|
383 |
+
# out_obj_id_list.append(out_obj_id)
|
384 |
+
# out_obj_area_list.append(np.count_nonzero(mask))
|
385 |
+
# out_obj_mask_list.append(mask)
|
386 |
+
|
387 |
+
# if len(seg_img_list) == 0:
|
388 |
+
# cog_seg_maps.append(seg_map)
|
389 |
+
# continue
|
390 |
+
|
391 |
+
# seg_imgs = np.stack(seg_img_list, axis=0) # b,H,W,3
|
392 |
+
# seg_imgs = torch.from_numpy(seg_imgs).permute(0,3,1,2) # / 255.0
|
393 |
|
394 |
+
# inputs = pe3r.siglip_processor(images=seg_imgs, return_tensors="pt")
|
395 |
+
# inputs = {key: value.to(device) for key, value in inputs.items()}
|
396 |
|
397 |
+
# image_features = pe3r.siglip.get_image_features(**inputs)
|
398 |
+
# image_features = image_features / image_features.norm(dim=-1, keepdim=True)
|
399 |
+
# image_features = image_features.detach().cpu()
|
400 |
+
|
401 |
+
# for i in range(len(out_obj_mask_list)):
|
402 |
+
# for j in range(i + 1, len(out_obj_mask_list)):
|
403 |
+
# mask1 = out_obj_mask_list[i]
|
404 |
+
# mask2 = out_obj_mask_list[j]
|
405 |
+
# intersection = np.logical_and(mask1, mask2).sum()
|
406 |
+
# area1 = out_obj_area_list[i]
|
407 |
+
# area2 = out_obj_area_list[j]
|
408 |
+
# if min(intersection / area1, intersection / area2) > 0.025:
|
409 |
+
# conf1 = area1 / (area1 + area2)
|
410 |
+
# # conf2 = area2 / (area1 + area2)
|
411 |
+
# image_features[j] = slerp(image_features[j], image_features[i], conf1)
|
412 |
+
|
413 |
+
# for i, clip_feat in enumerate(image_features):
|
414 |
+
# id = out_obj_id_list[i]
|
415 |
+
# if id in multi_view_clip_feats_map.keys():
|
416 |
+
# multi_view_clip_feats_map[id].append(clip_feat)
|
417 |
+
# multi_view_clip_area_map[id].append(out_obj_area_list[i])
|
418 |
+
# else:
|
419 |
+
# multi_view_clip_feats_map[id] = [clip_feat]
|
420 |
+
# multi_view_clip_area_map[id] = [out_obj_area_list[i]]
|
421 |
+
|
422 |
+
# cog_seg_maps.append(seg_map)
|
423 |
+
# del image_features
|
424 |
|
425 |
+
# for i in range(mask_num):
|
426 |
+
# if i in multi_view_clip_feats_map.keys():
|
427 |
+
# clip_feats = multi_view_clip_feats_map[i]
|
428 |
+
# mask_area = multi_view_clip_area_map[i]
|
429 |
+
# multi_view_clip_feats[i] = slerp_multiple(torch.stack(clip_feats), np.stack(mask_area))
|
430 |
+
# else:
|
431 |
+
# multi_view_clip_feats[i] = torch.zeros((1024))
|
432 |
+
# multi_view_clip_feats[mask_num] = torch.zeros((1024))
|
433 |
|
434 |
+
# return cog_seg_maps, rev_cog_seg_maps, multi_view_clip_feats
|
435 |
|
436 |
@spaces.GPU(duration=180)
|
437 |
def get_reconstructed_scene(outdir, device, silent, filelist, schedule, niter, min_conf_thr,
|
|
|
519 |
|
520 |
# return scene, outfile, imgs
|
521 |
|
522 |
+
# @spaces.GPU(duration=180)
|
523 |
+
# def get_3D_object_from_scene(outdir, pe3r, silent, device, text, threshold, scene, min_conf_thr, as_pointcloud,
|
524 |
+
# mask_sky, clean_depth, transparent_cams, cam_size):
|
525 |
|
526 |
+
# texts = [text]
|
527 |
+
# inputs = pe3r.siglip_tokenizer(text=texts, padding="max_length", return_tensors="pt")
|
528 |
+
# inputs = {key: value.to(device) for key, value in inputs.items()}
|
529 |
+
# with torch.no_grad():
|
530 |
+
# text_feats =pe3r.siglip.get_text_features(**inputs)
|
531 |
+
# text_feats = text_feats / text_feats.norm(dim=-1, keepdim=True)
|
532 |
+
# scene.render_image(text_feats, threshold)
|
533 |
+
# scene.ori_imgs = scene.rendered_imgs
|
534 |
+
# outfile = get_3D_model_from_scene(outdir, silent, scene, min_conf_thr, as_pointcloud, mask_sky,
|
535 |
+
# clean_depth, transparent_cams, cam_size)
|
536 |
+
# return outfile
|
537 |
|
538 |
|
539 |
def set_scenegraph_options(inputfiles, winsize, refid, scenegraph_type):
|
|
|
559 |
|
560 |
with tempfile.TemporaryDirectory(suffix='pe3r_gradio_demo') as tmpdirname:
|
561 |
recon_fun = functools.partial(get_reconstructed_scene, tmpdirname, pe3r, device, silent)
|
562 |
+
# model_from_scene_fun = functools.partial(get_3D_model_from_scene, tmpdirname, silent)
|
563 |
+
# get_3D_object_from_scene_fun = functools.partial(get_3D_object_from_scene, tmpdirname, pe3r, silent, device)
|
564 |
|
565 |
with gradio.Blocks(css=""".gradio-container {margin: 0 !important; min-width: 100%};""", title="PE3R Demo") as demo:
|
566 |
# scene state is save so that you can change conf_thr, cam_size... without rerunning the inference
|
|
|
622 |
mask_sky, clean_depth, transparent_cams, cam_size,
|
623 |
scenegraph_type, winsize, refid],
|
624 |
outputs=[scene, outmodel, outgallery])
|
625 |
+
# min_conf_thr.release(fn=model_from_scene_fun,
|
626 |
+
# inputs=[scene, min_conf_thr, as_pointcloud, mask_sky,
|
627 |
+
# clean_depth, transparent_cams, cam_size],
|
628 |
+
# outputs=outmodel)
|
629 |
+
# cam_size.change(fn=model_from_scene_fun,
|
630 |
+
# inputs=[scene, min_conf_thr, as_pointcloud, mask_sky,
|
631 |
+
# clean_depth, transparent_cams, cam_size],
|
632 |
+
# outputs=outmodel)
|
633 |
+
# as_pointcloud.change(fn=model_from_scene_fun,
|
634 |
+
# inputs=[scene, min_conf_thr, as_pointcloud, mask_sky,
|
635 |
+
# clean_depth, transparent_cams, cam_size],
|
636 |
+
# outputs=outmodel)
|
637 |
+
# mask_sky.change(fn=model_from_scene_fun,
|
638 |
+
# inputs=[scene, min_conf_thr, as_pointcloud, mask_sky,
|
639 |
+
# clean_depth, transparent_cams, cam_size],
|
640 |
+
# outputs=outmodel)
|
641 |
+
# clean_depth.change(fn=model_from_scene_fun,
|
642 |
+
# inputs=[scene, min_conf_thr, as_pointcloud, mask_sky,
|
643 |
+
# clean_depth, transparent_cams, cam_size],
|
644 |
+
# outputs=outmodel)
|
645 |
+
# transparent_cams.change(model_from_scene_fun,
|
646 |
+
# inputs=[scene, min_conf_thr, as_pointcloud, mask_sky,
|
647 |
+
# clean_depth, transparent_cams, cam_size],
|
648 |
+
# outputs=outmodel)
|
649 |
+
# find_btn.click(fn=get_3D_object_from_scene_fun,
|
650 |
+
# inputs=[text_input, threshold, scene, min_conf_thr, as_pointcloud, mask_sky,
|
651 |
+
# clean_depth, transparent_cams, cam_size],
|
652 |
+
# outputs=outmodel)
|
653 |
demo.launch(show_error=True, share=None, server_name=None, server_port=None)
|