Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
75eb903
1
Parent(s):
9f38f01
remove video generation
Browse files
app.py
CHANGED
@@ -217,7 +217,7 @@ def _make3d(output_queue: SimpleQueue, images: Image.Image):
|
|
217 |
print(f'type(images)={type(images)}')
|
218 |
global model
|
219 |
if IS_FLEXICUBES:
|
220 |
-
model.init_flexicubes_geometry(device, use_renderer=
|
221 |
model = model.eval()
|
222 |
|
223 |
images = np.asarray(images, dtype=np.float32) / 255.0
|
@@ -245,30 +245,27 @@ def _make3d(output_queue: SimpleQueue, images: Image.Image):
|
|
245 |
planes = model.forward_planes(images, input_cameras)
|
246 |
print(f'type(planes)={type(planes)}')
|
247 |
|
248 |
-
#
|
249 |
-
chunk_size = 20 if IS_FLEXICUBES else 1
|
250 |
-
render_size = 384
|
251 |
|
252 |
-
print(f'IS_FLEXICUBES={IS_FLEXICUBES}')
|
253 |
|
254 |
# frames = []
|
255 |
-
for i in tqdm(range(0, render_cameras.shape[1], chunk_size)):
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
output_queue.put(("log", "3dvideo", rr.Image(frame)))
|
271 |
-
# frames.append(frame)
|
272 |
|
273 |
# frames = torch.cat(frames, dim=1)
|
274 |
|
|
|
217 |
print(f'type(images)={type(images)}')
|
218 |
global model
|
219 |
if IS_FLEXICUBES:
|
220 |
+
model.init_flexicubes_geometry(device, use_renderer=False)
|
221 |
model = model.eval()
|
222 |
|
223 |
images = np.asarray(images, dtype=np.float32) / 255.0
|
|
|
245 |
planes = model.forward_planes(images, input_cameras)
|
246 |
print(f'type(planes)={type(planes)}')
|
247 |
|
248 |
+
# get video
|
249 |
+
# chunk_size = 20 if IS_FLEXICUBES else 1
|
250 |
+
# render_size = 384
|
251 |
|
|
|
252 |
|
253 |
# frames = []
|
254 |
+
# for i in tqdm(range(0, render_cameras.shape[1], chunk_size)):
|
255 |
+
# if IS_FLEXICUBES:
|
256 |
+
# frame = model.forward_geometry(
|
257 |
+
# planes,
|
258 |
+
# render_cameras[:, i:i+chunk_size],
|
259 |
+
# render_size=render_size,
|
260 |
+
# )['img']
|
261 |
+
# else:
|
262 |
+
# frame = model.synthesizer(
|
263 |
+
# planes,
|
264 |
+
# cameras=render_cameras[:, i:i+chunk_size],
|
265 |
+
# render_size=render_size,
|
266 |
+
# )['images_rgb']
|
267 |
+
|
268 |
+
# frames.append(frame)
|
|
|
|
|
269 |
|
270 |
# frames = torch.cat(frames, dim=1)
|
271 |
|