Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -99,7 +99,9 @@ def predict(_chatbot,task_history,viewer_voxel,viewer_mesh,task_new,seed,top_k,t
|
|
99 |
if is_video_file(q[0]):
|
100 |
content.append({'video': f'file://{q[0]}'})
|
101 |
else:
|
102 |
-
|
|
|
|
|
103 |
else:
|
104 |
content.append({'text': q})
|
105 |
messages.append({'role': 'user', 'content': content})
|
@@ -284,18 +286,18 @@ def add_text(history, task_history, text,task_new):
|
|
284 |
def add_file(history, task_history, file, task_new, fig, query):
|
285 |
if file.name.endswith(('.obj', '.glb')):
|
286 |
position_recon = load_vertices(file.name)#(N,3)
|
287 |
-
coords
|
288 |
-
ss
|
289 |
ss[:, coords[:, 0], coords[:, 1], coords[:, 2]] = 1
|
290 |
-
token
|
291 |
-
token
|
292 |
-
words
|
293 |
-
fig
|
294 |
return history, task_history,file.name,task_new,fig,gr.update(
|
295 |
-
value=f"{words}\nGive a quick overview of the object represented by this 3D mesh.")
|
296 |
-
history
|
297 |
task_history = task_history if task_history is not None else []
|
298 |
-
history
|
299 |
task_history = task_history + [((file.name,), None)]
|
300 |
task_new = task_new + [((file.name,), None)]
|
301 |
return history, task_history, file.name, task_new, fig, query
|
@@ -404,7 +406,7 @@ def add_file2(history, task_history, file,task_new):
|
|
404 |
history = history + [((file,), None)]
|
405 |
task_history = task_history + [((file,), None)]
|
406 |
task_new = task_new + [((file,), None)]
|
407 |
-
return history, task_history,file,task_new
|
408 |
|
409 |
def _transform_messages(original_messages):
|
410 |
transformed_messages = []
|
|
|
99 |
if is_video_file(q[0]):
|
100 |
content.append({'video': f'file://{q[0]}'})
|
101 |
else:
|
102 |
+
pipeline_image.preprocess_image_white(Image.open(q[0])).save(f"{TMP_DIR}/{trial_id}.png", "png")
|
103 |
+
content.append({'image': f'file://{TMP_DIR}/{trial_id}.png'})
|
104 |
+
#content.append({'image': f'file://{q[0]}'})
|
105 |
else:
|
106 |
content.append({'text': q})
|
107 |
messages.append({'role': 'user', 'content': content})
|
|
|
286 |
def add_file(history, task_history, file, task_new, fig, query):
|
287 |
if file.name.endswith(('.obj', '.glb')):
|
288 |
position_recon = load_vertices(file.name)#(N,3)
|
289 |
+
coords = ((torch.from_numpy(position_recon) + 0.5) * 64).int().contiguous()
|
290 |
+
ss = torch.zeros(1, 64, 64, 64, dtype=torch.long)
|
291 |
ss[:, coords[:, 0], coords[:, 1], coords[:, 2]] = 1
|
292 |
+
token = vqvae.Encode(ss.to(dtype=torch.float32).unsqueeze(0).to("cuda"))
|
293 |
+
token = token[0].cpu().numpy().tolist()
|
294 |
+
words = token_to_words(token)
|
295 |
+
fig = make_pointcloud_figure(position_recon,rotate=True)
|
296 |
return history, task_history,file.name,task_new,fig,gr.update(
|
297 |
+
value= f"{words}\nGive a quick overview of the object represented by this 3D mesh.")
|
298 |
+
history = history if history is not None else []
|
299 |
task_history = task_history if task_history is not None else []
|
300 |
+
history = history + [((file.name,), None)]
|
301 |
task_history = task_history + [((file.name,), None)]
|
302 |
task_new = task_new + [((file.name,), None)]
|
303 |
return history, task_history, file.name, task_new, fig, query
|
|
|
406 |
history = history + [((file,), None)]
|
407 |
task_history = task_history + [((file,), None)]
|
408 |
task_new = task_new + [((file,), None)]
|
409 |
+
return history, task_history, file, task_new
|
410 |
|
411 |
def _transform_messages(original_messages):
|
412 |
transformed_messages = []
|