Spaces:
Runtime error
Runtime error
Amanpreet
commited on
Commit
·
4e2aa43
1
Parent(s):
440b9db
latest
Browse files- VideoToNPZ/gen_skes.py +16 -9
- VideoToNPZ/lib/pose/hrnet/pose_estimation/gen_kpts.py +38 -11
- app.py +159 -79
- convertNPZtoBVH/conver_bvh.py +16 -21
- convertNPZtoBVH/conver_obj.py +23 -81
VideoToNPZ/gen_skes.py
CHANGED
@@ -37,7 +37,6 @@ adj = adj_mx_from_skeleton(skeleton)
|
|
37 |
joints_left, joints_right = [4, 5, 6, 11, 12, 13], [1, 2, 3, 14, 15, 16]
|
38 |
kps_left, kps_right = [4, 5, 6, 11, 12, 13], [1, 2, 3, 14, 15, 16]
|
39 |
|
40 |
-
# Set up signal handler for keyboard interrupt
|
41 |
def signal_handler(sig, frame):
|
42 |
print("\nInterrupted by user, shutting down...")
|
43 |
if 'pool' in locals() and pool is not None:
|
@@ -67,8 +66,13 @@ def generate_skeletons(video=''):
|
|
67 |
cap = cv2.VideoCapture(video)
|
68 |
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
|
69 |
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
|
|
70 |
|
|
|
|
|
|
|
71 |
keypoints, scores = hrnet_pose(video, det_dim=416, gen_output=True)
|
|
|
72 |
keypoints, scores, valid_frames = h36m_coco_format(keypoints, scores)
|
73 |
re_kpts = revise_kpts(keypoints, scores, valid_frames)
|
74 |
num_person = len(re_kpts)
|
@@ -78,15 +82,19 @@ def generate_skeletons(video=''):
|
|
78 |
pad = (81 - 1) // 2
|
79 |
causal_shift = 0
|
80 |
|
81 |
-
|
82 |
-
|
83 |
print('Recording 3D Pose:')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
# Add a loading bar
|
86 |
-
for i in tqdm(range(100)):
|
87 |
-
time.sleep(0.01)
|
88 |
-
|
89 |
-
# Create output directory with absolute path
|
90 |
output_dir = os.path.abspath('../outputs/')
|
91 |
print(f"Creating output directory: {output_dir}")
|
92 |
os.makedirs(output_dir, exist_ok=True)
|
@@ -108,7 +116,6 @@ def arg_parse():
|
|
108 |
|
109 |
if __name__ == "__main__":
|
110 |
args = arg_parse()
|
111 |
-
# Use the video path as-is if absolute, otherwise prepend data_root
|
112 |
if os.path.isabs(args.video):
|
113 |
video_path = args.video
|
114 |
else:
|
|
|
37 |
joints_left, joints_right = [4, 5, 6, 11, 12, 13], [1, 2, 3, 14, 15, 16]
|
38 |
kps_left, kps_right = [4, 5, 6, 11, 12, 13], [1, 2, 3, 14, 15, 16]
|
39 |
|
|
|
40 |
def signal_handler(sig, frame):
|
41 |
print("\nInterrupted by user, shutting down...")
|
42 |
if 'pool' in locals() and pool is not None:
|
|
|
66 |
cap = cv2.VideoCapture(video)
|
67 |
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
|
68 |
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
69 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
70 |
|
71 |
+
# 2D Keypoint Generation (handled by gen_video_kpts)
|
72 |
+
print('Generating 2D Keypoints:')
|
73 |
+
sys.stdout.flush()
|
74 |
keypoints, scores = hrnet_pose(video, det_dim=416, gen_output=True)
|
75 |
+
|
76 |
keypoints, scores, valid_frames = h36m_coco_format(keypoints, scores)
|
77 |
re_kpts = revise_kpts(keypoints, scores, valid_frames)
|
78 |
num_person = len(re_kpts)
|
|
|
82 |
pad = (81 - 1) // 2
|
83 |
causal_shift = 0
|
84 |
|
85 |
+
# 3D Pose Generation
|
|
|
86 |
print('Recording 3D Pose:')
|
87 |
+
print(f"PROGRESS:100.00") # Start 3D at 100%
|
88 |
+
sys.stdout.flush()
|
89 |
+
total_valid_frames = len(valid_frames) if valid_frames else total_frames
|
90 |
+
prediction = gen_pose(re_kpts, valid_frames, width, height, model_pos, pad, causal_shift)
|
91 |
+
# Simulate 3D progress (replace with gen_pose loop if shared)
|
92 |
+
for i in range(total_valid_frames):
|
93 |
+
progress = 100 + ((i + 1) / total_valid_frames * 100) # 100-200% for 3D
|
94 |
+
print(f"PROGRESS:{progress:.2f}")
|
95 |
+
sys.stdout.flush()
|
96 |
+
time.sleep(0.01) # Placeholder; remove if gen_pose has its own loop
|
97 |
|
|
|
|
|
|
|
|
|
|
|
98 |
output_dir = os.path.abspath('../outputs/')
|
99 |
print(f"Creating output directory: {output_dir}")
|
100 |
os.makedirs(output_dir, exist_ok=True)
|
|
|
116 |
|
117 |
if __name__ == "__main__":
|
118 |
args = arg_parse()
|
|
|
119 |
if os.path.isabs(args.video):
|
120 |
video_path = args.video
|
121 |
else:
|
VideoToNPZ/lib/pose/hrnet/pose_estimation/gen_kpts.py
CHANGED
@@ -174,15 +174,14 @@ def gen_video_kpts(video, det_dim=416, num_person=1, gen_output=False, batch_siz
|
|
174 |
|
175 |
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
176 |
print('Recording 2D pose ...')
|
177 |
-
|
178 |
-
from io import StringIO
|
179 |
|
180 |
if animation:
|
181 |
# Animation mode uses frame-by-frame processing like in the backup code
|
182 |
kpts_result = []
|
183 |
scores_result = []
|
184 |
|
185 |
-
for i in
|
186 |
ret, frame = cap.read()
|
187 |
if not ret:
|
188 |
break
|
@@ -192,6 +191,7 @@ def gen_video_kpts(video, det_dim=416, num_person=1, gen_output=False, batch_siz
|
|
192 |
|
193 |
if bboxs is None or not bboxs.any():
|
194 |
print('No person detected!')
|
|
|
195 |
continue
|
196 |
|
197 |
# Track people
|
@@ -248,10 +248,7 @@ def gen_video_kpts(video, det_dim=416, num_person=1, gen_output=False, batch_siz
|
|
248 |
if key & 0xFF == ord('q'):
|
249 |
break
|
250 |
else:
|
251 |
-
# Optimized batch processing with Queue
|
252 |
-
old_stdout = sys.stdout
|
253 |
-
sys.stdout = StringIO()
|
254 |
-
|
255 |
frame_queue = mp.Queue(maxsize=batch_size * 2)
|
256 |
loader_thread = Thread(target=frame_loader, args=(video, frame_queue, video_length))
|
257 |
loader_thread.start()
|
@@ -268,11 +265,14 @@ def gen_video_kpts(video, det_dim=416, num_person=1, gen_output=False, batch_siz
|
|
268 |
# GPU batch processing
|
269 |
batch_frames = []
|
270 |
with torch.no_grad():
|
271 |
-
for
|
272 |
frame = frame_queue.get()
|
273 |
if frame is None:
|
274 |
break
|
275 |
batch_frames.append(frame)
|
|
|
|
|
|
|
276 |
|
277 |
if len(batch_frames) >= batch_size:
|
278 |
kpts_batch, scores_batch = process_batch(batch_frames, human_model, pose_model,
|
@@ -283,6 +283,22 @@ def gen_video_kpts(video, det_dim=416, num_person=1, gen_output=False, batch_siz
|
|
283 |
scores_result[frame_idx:frame_idx + 1] = scores[None, :num_person]
|
284 |
frame_idx += 1
|
285 |
batch_frames = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
|
287 |
# Process remaining frames
|
288 |
if batch_frames:
|
@@ -293,6 +309,9 @@ def gen_video_kpts(video, det_dim=416, num_person=1, gen_output=False, batch_siz
|
|
293 |
kpts_result[frame_idx:frame_idx + 1] = kpts[None, :num_person]
|
294 |
scores_result[frame_idx:frame_idx + 1] = scores[None, :num_person]
|
295 |
frame_idx += 1
|
|
|
|
|
|
|
296 |
else:
|
297 |
# CPU batch processing with multiprocessing
|
298 |
pool = mp.Pool(processes=mp.cpu_count())
|
@@ -302,11 +321,14 @@ def gen_video_kpts(video, det_dim=416, num_person=1, gen_output=False, batch_siz
|
|
302 |
|
303 |
batch_frames = []
|
304 |
with torch.no_grad():
|
305 |
-
for
|
306 |
frame = frame_queue.get()
|
307 |
if frame is None:
|
308 |
break
|
309 |
batch_frames.append(frame)
|
|
|
|
|
|
|
310 |
if len(batch_frames) >= batch_size:
|
311 |
kpts_batch, scores_batch = process_func(batch_frames)
|
312 |
for kpts, scores in zip(kpts_batch, scores_batch):
|
@@ -314,6 +336,9 @@ def gen_video_kpts(video, det_dim=416, num_person=1, gen_output=False, batch_siz
|
|
314 |
scores_result[frame_idx:frame_idx + 1] = scores[None, :num_person]
|
315 |
frame_idx += 1
|
316 |
batch_frames = []
|
|
|
|
|
|
|
317 |
|
318 |
# Process remaining frames
|
319 |
if batch_frames:
|
@@ -322,20 +347,22 @@ def gen_video_kpts(video, det_dim=416, num_person=1, gen_output=False, batch_siz
|
|
322 |
kpts_result[frame_idx:frame_idx + 1] = kpts[None, :num_person]
|
323 |
scores_result[frame_idx:frame_idx + 1] = scores[None, :num_person]
|
324 |
frame_idx += 1
|
|
|
|
|
|
|
325 |
|
326 |
pool.close()
|
327 |
pool.join()
|
328 |
except KeyboardInterrupt:
|
329 |
print("\nInterrupted by user, shutting down...")
|
|
|
330 |
if pool is not None:
|
331 |
pool.terminate()
|
332 |
pool.join()
|
333 |
loader_thread.join()
|
334 |
-
sys.stdout = old_stdout
|
335 |
sys.exit(0)
|
336 |
|
337 |
loader_thread.join()
|
338 |
-
sys.stdout = old_stdout
|
339 |
|
340 |
if gen_output and kpts_result.any():
|
341 |
keypoints = kpts_result[:frame_idx].transpose(1, 0, 2, 3)
|
|
|
174 |
|
175 |
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
176 |
print('Recording 2D pose ...')
|
177 |
+
sys.stdout.flush() # Ensure initial message shows up immediately
|
|
|
178 |
|
179 |
if animation:
|
180 |
# Animation mode uses frame-by-frame processing like in the backup code
|
181 |
kpts_result = []
|
182 |
scores_result = []
|
183 |
|
184 |
+
for i in range(video_length):
|
185 |
ret, frame = cap.read()
|
186 |
if not ret:
|
187 |
break
|
|
|
191 |
|
192 |
if bboxs is None or not bboxs.any():
|
193 |
print('No person detected!')
|
194 |
+
sys.stdout.flush()
|
195 |
continue
|
196 |
|
197 |
# Track people
|
|
|
248 |
if key & 0xFF == ord('q'):
|
249 |
break
|
250 |
else:
|
251 |
+
# Optimized batch processing with Queue (no StringIO redirection)
|
|
|
|
|
|
|
252 |
frame_queue = mp.Queue(maxsize=batch_size * 2)
|
253 |
loader_thread = Thread(target=frame_loader, args=(video, frame_queue, video_length))
|
254 |
loader_thread.start()
|
|
|
265 |
# GPU batch processing
|
266 |
batch_frames = []
|
267 |
with torch.no_grad():
|
268 |
+
for i in range(video_length):
|
269 |
frame = frame_queue.get()
|
270 |
if frame is None:
|
271 |
break
|
272 |
batch_frames.append(frame)
|
273 |
+
progress = (i + 1) / video_length * 100
|
274 |
+
print(f"PROGRESS:{progress:.2f}")
|
275 |
+
sys.stdout.flush() # Force per-frame update
|
276 |
|
277 |
if len(batch_frames) >= batch_size:
|
278 |
kpts_batch, scores_batch = process_batch(batch_frames, human_model, pose_model,
|
|
|
283 |
scores_result[frame_idx:frame_idx + 1] = scores[None, :num_person]
|
284 |
frame_idx += 1
|
285 |
batch_frames = []
|
286 |
+
progress = (frame_idx / video_length) * 100
|
287 |
+
print(f"PROGRESS:{progress:.2f}")
|
288 |
+
sys.stdout.flush() # Force after batch
|
289 |
+
|
290 |
+
# Process remaining frames
|
291 |
+
if batch_frames:
|
292 |
+
kpts_batch, scores_batch = process_batch(batch_frames, human_model, pose_model,
|
293 |
+
det_dim, num_person, args.thred_score,
|
294 |
+
use_fp16, device)
|
295 |
+
for kpts, scores in zip(kpts_batch, scores_batch):
|
296 |
+
kpts_result[frame_idx:frame_idx + 1] = kpts[None, :num_person]
|
297 |
+
scores_result[frame_idx:frame_idx + 1] = scores[None, :num_person]
|
298 |
+
frame_idx += 1
|
299 |
+
progress = (frame_idx / video_length) * 100
|
300 |
+
print(f"PROGRESS:{progress:.2f}")
|
301 |
+
sys.stdout.flush() # Force after batch
|
302 |
|
303 |
# Process remaining frames
|
304 |
if batch_frames:
|
|
|
309 |
kpts_result[frame_idx:frame_idx + 1] = kpts[None, :num_person]
|
310 |
scores_result[frame_idx:frame_idx + 1] = scores[None, :num_person]
|
311 |
frame_idx += 1
|
312 |
+
progress = (frame_idx / video_length) * 100
|
313 |
+
print(f"PROGRESS:{progress:.2f}")
|
314 |
+
sys.stdout.flush() # Force final update
|
315 |
else:
|
316 |
# CPU batch processing with multiprocessing
|
317 |
pool = mp.Pool(processes=mp.cpu_count())
|
|
|
321 |
|
322 |
batch_frames = []
|
323 |
with torch.no_grad():
|
324 |
+
for i in range(video_length):
|
325 |
frame = frame_queue.get()
|
326 |
if frame is None:
|
327 |
break
|
328 |
batch_frames.append(frame)
|
329 |
+
progress = (i + 1) / video_length * 100
|
330 |
+
print(f"PROGRESS:{progress:.2f}")
|
331 |
+
sys.stdout.flush() # Force per-frame update
|
332 |
if len(batch_frames) >= batch_size:
|
333 |
kpts_batch, scores_batch = process_func(batch_frames)
|
334 |
for kpts, scores in zip(kpts_batch, scores_batch):
|
|
|
336 |
scores_result[frame_idx:frame_idx + 1] = scores[None, :num_person]
|
337 |
frame_idx += 1
|
338 |
batch_frames = []
|
339 |
+
progress = (frame_idx / video_length) * 100
|
340 |
+
print(f"PROGRESS:{progress:.2f}")
|
341 |
+
sys.stdout.flush() # Force after batch
|
342 |
|
343 |
# Process remaining frames
|
344 |
if batch_frames:
|
|
|
347 |
kpts_result[frame_idx:frame_idx + 1] = kpts[None, :num_person]
|
348 |
scores_result[frame_idx:frame_idx + 1] = scores[None, :num_person]
|
349 |
frame_idx += 1
|
350 |
+
progress = (frame_idx / video_length) * 100
|
351 |
+
print(f"PROGRESS:{progress:.2f}")
|
352 |
+
sys.stdout.flush() # Force final update
|
353 |
|
354 |
pool.close()
|
355 |
pool.join()
|
356 |
except KeyboardInterrupt:
|
357 |
print("\nInterrupted by user, shutting down...")
|
358 |
+
sys.stdout.flush()
|
359 |
if pool is not None:
|
360 |
pool.terminate()
|
361 |
pool.join()
|
362 |
loader_thread.join()
|
|
|
363 |
sys.exit(0)
|
364 |
|
365 |
loader_thread.join()
|
|
|
366 |
|
367 |
if gen_output and kpts_result.any():
|
368 |
keypoints = kpts_result[:frame_idx].transpose(1, 0, 2, 3)
|
app.py
CHANGED
@@ -5,109 +5,187 @@ import os
|
|
5 |
import time
|
6 |
import shutil
|
7 |
import tempfile
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
def run_command(command,
|
10 |
-
st.write(f"Running command: {' '.join(command)}")
|
11 |
try:
|
12 |
-
|
13 |
-
|
|
|
|
|
14 |
command,
|
15 |
-
check=True,
|
16 |
cwd=working_dir,
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
19 |
)
|
20 |
-
end_time = time.time()
|
21 |
-
execution_time = end_time - start_time
|
22 |
-
st.write(f"{description} completed in {execution_time:.2f} seconds")
|
23 |
-
if process.stdout:
|
24 |
-
st.text(f"Output:\n{process.stdout}")
|
25 |
-
if process.stderr:
|
26 |
-
st.text(f"Warnings:\n{process.stderr}")
|
27 |
-
return True
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
except Exception as e:
|
34 |
-
st.error(f"
|
35 |
return False
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
def process_video(video_file):
|
38 |
-
base_dir =
|
39 |
-
gen_skes_path = os.path.join(base_dir, "VideoToNPZ", "gen_skes.py")
|
40 |
-
convert_obj_path = os.path.join(base_dir, "convertNPZtoBVH", "conver_obj.py")
|
41 |
-
convert_bvh_path = os.path.join(base_dir, "convertNPZtoBVH", "conver_bvh.py")
|
42 |
|
43 |
-
|
44 |
-
|
|
|
45 |
|
46 |
for script_path in [gen_skes_path, convert_obj_path, convert_bvh_path]:
|
47 |
-
if not
|
48 |
st.error(f"Required script not found: {script_path}")
|
49 |
return None
|
50 |
|
51 |
with tempfile.TemporaryDirectory() as tmp_dir:
|
52 |
-
video_path =
|
53 |
with open(video_path, "wb") as f:
|
54 |
f.write(video_file.read())
|
55 |
|
56 |
-
if not
|
57 |
st.error(f"Video file not found at: {video_path}")
|
58 |
return None
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
pipeline_steps = [
|
62 |
-
{
|
63 |
-
|
64 |
-
|
65 |
-
"working_dir": os.path.dirname(gen_skes_path)
|
66 |
-
},
|
67 |
-
{
|
68 |
-
"command": [sys.executable, convert_obj_path],
|
69 |
-
"description": "Converting to OBJ sequence",
|
70 |
-
"working_dir": os.path.dirname(convert_obj_path)
|
71 |
-
},
|
72 |
-
{
|
73 |
-
"command": [sys.executable, convert_bvh_path],
|
74 |
-
"description": "Converting to BVH",
|
75 |
-
"working_dir": os.path.dirname(convert_bvh_path)
|
76 |
-
}
|
77 |
]
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
status_text = st.empty()
|
83 |
|
84 |
for i, step in enumerate(pipeline_steps):
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
else:
|
108 |
-
st.error("Failed to generate BVH file")
|
|
|
|
|
109 |
return None
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
def main():
|
112 |
st.title("Video to BVH Converter")
|
113 |
st.write("Upload a video file to convert it to BVH format")
|
@@ -118,25 +196,27 @@ def main():
|
|
118 |
st.video(uploaded_file)
|
119 |
|
120 |
if st.button("Convert to BVH"):
|
121 |
-
with st.spinner("Processing video...
|
122 |
-
|
123 |
|
124 |
-
if
|
125 |
-
with open(bvh_file, "rb") as f:
|
126 |
st.download_button(
|
127 |
label="Download BVH File",
|
128 |
data=f,
|
129 |
file_name="output.bvh",
|
130 |
-
mime="application/octet-stream"
|
|
|
|
|
131 |
)
|
132 |
-
st.success("Conversion completed successfully!")
|
133 |
|
134 |
st.markdown("""
|
135 |
### Instructions
|
136 |
1. Upload a video file using the uploader above
|
137 |
2. Click 'Convert to BVH' to start the conversion process
|
138 |
-
3.
|
139 |
-
4. Download the resulting BVH file
|
140 |
""")
|
141 |
|
142 |
if __name__ == "__main__":
|
|
|
5 |
import time
|
6 |
import shutil
|
7 |
import tempfile
|
8 |
+
import threading
|
9 |
+
import queue
|
10 |
+
from datetime import datetime
|
11 |
+
from pathlib import Path
|
12 |
|
13 |
+
def run_command(command, working_dir, progress_bar, progress_text, step_start_progress, step_weight, show_progress=True):
|
|
|
14 |
try:
|
15 |
+
env = os.environ.copy()
|
16 |
+
env["PYTHONUNBUFFERED"] = "1"
|
17 |
+
|
18 |
+
process = subprocess.Popen(
|
19 |
command,
|
|
|
20 |
cwd=working_dir,
|
21 |
+
stdout=subprocess.PIPE,
|
22 |
+
stderr=subprocess.PIPE,
|
23 |
+
text=True,
|
24 |
+
bufsize=1,
|
25 |
+
universal_newlines=True,
|
26 |
+
env=env
|
27 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
+
stdout_queue = queue.Queue()
|
30 |
+
stderr_queue = queue.Queue()
|
31 |
+
|
32 |
+
def read_output(pipe, q, source):
|
33 |
+
for line in iter(pipe.readline, ''):
|
34 |
+
q.put((source, line))
|
35 |
+
pipe.close()
|
36 |
+
|
37 |
+
stdout_thread = threading.Thread(target=read_output, args=(process.stdout, stdout_queue, 'stdout'))
|
38 |
+
stderr_thread = threading.Thread(target=read_output, args=(process.stderr, stderr_queue, 'stderr'))
|
39 |
+
stdout_thread.daemon = True
|
40 |
+
stderr_thread.daemon = True
|
41 |
+
stdout_thread.start()
|
42 |
+
stderr_thread.start()
|
43 |
+
|
44 |
+
total_progress = step_start_progress
|
45 |
+
stderr_lines = []
|
46 |
+
|
47 |
+
while process.poll() is None or not (stdout_queue.empty() and stderr_queue.empty()):
|
48 |
+
try:
|
49 |
+
source, line = next((q.get_nowait() for q in [stdout_queue, stderr_queue] if not q.empty()), (None, None))
|
50 |
+
if line:
|
51 |
+
if source == 'stdout':
|
52 |
+
if show_progress and line.startswith("PROGRESS:"):
|
53 |
+
try:
|
54 |
+
progress = float(line.strip().split("PROGRESS:")[1]) / 100
|
55 |
+
if Path(command[1]).name == 'gen_skes.py':
|
56 |
+
# Scale 0-200% to 0-80%
|
57 |
+
adjusted_progress = step_start_progress + (progress / 2) * step_weight
|
58 |
+
else:
|
59 |
+
# Scale 0-100% to 20% for conver_bvh.py
|
60 |
+
adjusted_progress = step_start_progress + (progress * step_weight)
|
61 |
+
total_progress = min(adjusted_progress, step_start_progress + step_weight)
|
62 |
+
progress_bar.progress(total_progress)
|
63 |
+
progress_text.text(f"{int(total_progress * 100)}%")
|
64 |
+
except ValueError:
|
65 |
+
pass
|
66 |
+
elif source == 'stderr':
|
67 |
+
stderr_lines.append(line.strip())
|
68 |
+
except queue.Empty:
|
69 |
+
time.sleep(0.01)
|
70 |
+
|
71 |
+
stdout_thread.join()
|
72 |
+
stderr_thread.join()
|
73 |
+
|
74 |
+
if process.returncode != 0:
|
75 |
+
stderr_output = '\n'.join(stderr_lines)
|
76 |
+
st.error(f"Error in {Path(command[1]).name}:\n{stderr_output}")
|
77 |
+
return False
|
78 |
+
|
79 |
+
if show_progress:
|
80 |
+
progress_bar.progress(step_start_progress + step_weight)
|
81 |
+
progress_text.text(f"{int((step_start_progress + step_weight) * 100)}%")
|
82 |
+
return True
|
83 |
|
84 |
except Exception as e:
|
85 |
+
st.error(f"Exception in {Path(command[1]).name}: {str(e)}")
|
86 |
return False
|
87 |
|
88 |
+
def cleanup_output_folder(output_dir, delay=1800):
|
89 |
+
time.sleep(delay)
|
90 |
+
if os.path.exists(output_dir):
|
91 |
+
shutil.rmtree(output_dir, ignore_errors=True)
|
92 |
+
print(f"Deleted temporary output folder after timeout: {output_dir}")
|
93 |
+
|
94 |
def process_video(video_file):
|
95 |
+
base_dir = Path(__file__).parent.resolve()
|
|
|
|
|
|
|
96 |
|
97 |
+
gen_skes_path = base_dir / "VideoToNPZ" / "gen_skes.py"
|
98 |
+
convert_obj_path = base_dir / "convertNPZtoBVH" / "conver_obj.py"
|
99 |
+
convert_bvh_path = base_dir / "convertNPZtoBVH" / "conver_bvh.py"
|
100 |
|
101 |
for script_path in [gen_skes_path, convert_obj_path, convert_bvh_path]:
|
102 |
+
if not script_path.exists():
|
103 |
st.error(f"Required script not found: {script_path}")
|
104 |
return None
|
105 |
|
106 |
with tempfile.TemporaryDirectory() as tmp_dir:
|
107 |
+
video_path = Path(tmp_dir) / "input_video.mp4"
|
108 |
with open(video_path, "wb") as f:
|
109 |
f.write(video_file.read())
|
110 |
|
111 |
+
if not video_path.exists():
|
112 |
st.error(f"Video file not found at: {video_path}")
|
113 |
return None
|
114 |
+
|
115 |
+
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
116 |
+
output_dir = base_dir / f"outputs_{timestamp}"
|
117 |
+
output_dir.mkdir(exist_ok=True)
|
118 |
+
|
119 |
+
if not os.access(output_dir, os.W_OK):
|
120 |
+
st.error(f"Cannot write to output directory: {output_dir}")
|
121 |
+
return None
|
122 |
+
|
123 |
+
default_output_dir = base_dir / "outputs"
|
124 |
|
125 |
pipeline_steps = [
|
126 |
+
{"command": [sys.executable, str(gen_skes_path), "-v", str(video_path)], "working_dir": gen_skes_path.parent, "weight": 0.8, "show_progress": True},
|
127 |
+
{"command": [sys.executable, str(convert_obj_path), "--output-dir", str(output_dir)], "working_dir": convert_obj_path.parent, "weight": 0.0, "show_progress": False},
|
128 |
+
{"command": [sys.executable, str(convert_bvh_path), "--output-dir", str(output_dir)], "working_dir": convert_bvh_path.parent, "weight": 0.2, "show_progress": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
]
|
130 |
|
131 |
+
progress_bar = st.progress(0.0)
|
132 |
+
progress_text = st.empty() # Added for percentage display
|
133 |
+
total_progress = 0.0
|
|
|
134 |
|
135 |
for i, step in enumerate(pipeline_steps):
|
136 |
+
success = run_command(
|
137 |
+
step["command"],
|
138 |
+
step["working_dir"],
|
139 |
+
progress_bar,
|
140 |
+
progress_text,
|
141 |
+
total_progress,
|
142 |
+
step["weight"],
|
143 |
+
show_progress=step["show_progress"]
|
144 |
+
)
|
145 |
+
if not success:
|
146 |
+
st.error(f"Failed at step: {' '.join(map(str, step['command']))}")
|
147 |
+
if default_output_dir.exists():
|
148 |
+
shutil.rmtree(default_output_dir, ignore_errors=True)
|
149 |
+
return None
|
150 |
+
|
151 |
+
if i == 0 and default_output_dir.exists():
|
152 |
+
npz_dir = default_output_dir / "npz"
|
153 |
+
if npz_dir.exists():
|
154 |
+
target_npz_dir = output_dir / "npz"
|
155 |
+
shutil.move(str(npz_dir), str(target_npz_dir))
|
156 |
+
if default_output_dir.exists():
|
157 |
+
shutil.rmtree(default_output_dir, ignore_errors=True)
|
158 |
+
|
159 |
+
total_progress += step["weight"]
|
160 |
+
if step["show_progress"]:
|
161 |
+
progress_bar.progress(min(total_progress, 1.0))
|
162 |
+
progress_text.text(f"{int(total_progress * 100)}%")
|
163 |
+
|
164 |
+
bvh_output_dir = output_dir / "bvh"
|
165 |
+
bvh_file = bvh_output_dir / "output.bvh"
|
166 |
+
|
167 |
+
if bvh_file.exists():
|
168 |
+
cleanup_thread = threading.Thread(target=cleanup_output_folder, args=(output_dir,))
|
169 |
+
cleanup_thread.daemon = True
|
170 |
+
cleanup_thread.start()
|
171 |
+
|
172 |
+
return {
|
173 |
+
'bvh_file': bvh_file,
|
174 |
+
'output_dir': output_dir
|
175 |
+
}
|
176 |
else:
|
177 |
+
st.error(f"Failed to generate BVH file at: {bvh_file}")
|
178 |
+
if default_output_dir.exists():
|
179 |
+
shutil.rmtree(default_output_dir, ignore_errors=True)
|
180 |
return None
|
181 |
|
182 |
+
def cleanup_immediate(output_dir):
|
183 |
+
if output_dir and os.path.exists(output_dir):
|
184 |
+
shutil.rmtree(output_dir, ignore_errors=True)
|
185 |
+
st.success("")
|
186 |
+
else:
|
187 |
+
st.warning("")
|
188 |
+
|
189 |
def main():
|
190 |
st.title("Video to BVH Converter")
|
191 |
st.write("Upload a video file to convert it to BVH format")
|
|
|
196 |
st.video(uploaded_file)
|
197 |
|
198 |
if st.button("Convert to BVH"):
|
199 |
+
with st.spinner("Processing video..."):
|
200 |
+
conversion_result = process_video(uploaded_file)
|
201 |
|
202 |
+
if conversion_result:
|
203 |
+
with open(conversion_result['bvh_file'], "rb") as f:
|
204 |
st.download_button(
|
205 |
label="Download BVH File",
|
206 |
data=f,
|
207 |
file_name="output.bvh",
|
208 |
+
mime="application/octet-stream",
|
209 |
+
on_click=cleanup_immediate,
|
210 |
+
args=(conversion_result['output_dir'],)
|
211 |
)
|
212 |
+
st.success("Conversion completed successfully! File will be deleted after download or in 30 minutes.")
|
213 |
|
214 |
st.markdown("""
|
215 |
### Instructions
|
216 |
1. Upload a video file using the uploader above
|
217 |
2. Click 'Convert to BVH' to start the conversion process
|
218 |
+
3. Watch the progress bar as it processes
|
219 |
+
4. Download the resulting BVH file - folder will be deleted immediately after download or after 30 minutes
|
220 |
""")
|
221 |
|
222 |
if __name__ == "__main__":
|
convertNPZtoBVH/conver_bvh.py
CHANGED
@@ -3,10 +3,11 @@ import numpy as np
|
|
3 |
from scipy.spatial.transform import Rotation
|
4 |
from collections import deque
|
5 |
from tqdm import tqdm
|
|
|
|
|
6 |
|
7 |
print(f"Saving 3D Motion")
|
8 |
|
9 |
-
|
10 |
def parse_obj(filename):
|
11 |
vertices = []
|
12 |
lines = []
|
@@ -23,7 +24,6 @@ def parse_obj(filename):
|
|
23 |
except Exception as e:
|
24 |
raise ValueError(f"Error parsing OBJ file {filename}: {str(e)}")
|
25 |
|
26 |
-
|
27 |
def build_hierarchy(lines, root=0):
|
28 |
num_joints = max(max(line) for line in lines) + 1
|
29 |
adj = [[] for _ in range(num_joints)]
|
@@ -49,7 +49,6 @@ def build_hierarchy(lines, root=0):
|
|
49 |
children[parent[c]].append(c)
|
50 |
return parent, children
|
51 |
|
52 |
-
|
53 |
def compute_offsets(vertices_ref, parent):
|
54 |
num_joints = len(vertices_ref)
|
55 |
offsets = np.zeros((num_joints, 3))
|
@@ -58,7 +57,6 @@ def compute_offsets(vertices_ref, parent):
|
|
58 |
offsets[j] = vertices_ref[j] - vertices_ref[parent[j]]
|
59 |
return offsets
|
60 |
|
61 |
-
|
62 |
def compute_R_world(joint, vertices_ref, vertices_cur, children):
|
63 |
if not children[joint]:
|
64 |
return np.eye(3)
|
@@ -94,12 +92,9 @@ def compute_R_world(joint, vertices_ref, vertices_cur, children):
|
|
94 |
R = U @ Vh
|
95 |
return R
|
96 |
|
97 |
-
|
98 |
-
def main():
|
99 |
-
output_dir = os.path.abspath('../outputs/')
|
100 |
-
os.makedirs(output_dir, exist_ok=True)
|
101 |
folder = os.path.join(output_dir, 'obj_sequence')
|
102 |
-
|
103 |
try:
|
104 |
obj_files = sorted([f for f in os.listdir(folder) if f.endswith('.obj')])
|
105 |
except Exception as e:
|
@@ -118,36 +113,35 @@ def main():
|
|
118 |
root = 0
|
119 |
|
120 |
hierarchy_order = []
|
121 |
-
|
122 |
def dfs(joint):
|
123 |
hierarchy_order.append(joint)
|
124 |
for child in children[joint]:
|
125 |
dfs(child)
|
126 |
-
|
127 |
dfs(root)
|
128 |
|
129 |
motion_data = []
|
130 |
-
|
|
|
|
|
131 |
vertices_cur = parse_obj(os.path.join(folder, obj_file))[0]
|
132 |
R_world = [compute_R_world(j, vertices_ref, vertices_cur, children) for j in range(num_joints)]
|
133 |
R_local = [R_world[j] if parent[j] == -1 else R_world[parent[j]].T @ R_world[j] for j in range(num_joints)]
|
134 |
euler_angles = [Rotation.from_matrix(R).as_euler('ZYX', degrees=True) for R in R_local]
|
135 |
root_pos = vertices_cur[root]
|
136 |
motion_line = list(root_pos) + list(euler_angles[root])
|
137 |
-
for j in hierarchy_order[1:]:
|
138 |
motion_line.extend(euler_angles[j])
|
|
|
|
|
|
|
139 |
motion_data.append(motion_line)
|
140 |
|
141 |
-
# Note: Smoothing function has been removed
|
142 |
-
# Note: Elbow constraints have been removed
|
143 |
-
|
144 |
bvh_dir = os.path.join(output_dir, 'bvh')
|
145 |
os.makedirs(bvh_dir, exist_ok=True)
|
146 |
bvh_file = os.path.join(bvh_dir, 'output.bvh')
|
147 |
|
148 |
with open(bvh_file, 'w') as f:
|
149 |
f.write("HIERARCHY\n")
|
150 |
-
|
151 |
def write_hierarchy(joint, parent, f, indent=0):
|
152 |
if parent == -1:
|
153 |
f.write("ROOT Joint{}\n".format(joint))
|
@@ -156,8 +150,7 @@ def main():
|
|
156 |
f.write(" " * indent + "{\n")
|
157 |
f.write(" " * (indent + 1) + "OFFSET {:.6f} {:.6f} {:.6f}\n".format(*offsets[joint]))
|
158 |
if parent == -1:
|
159 |
-
f.write(" " * (
|
160 |
-
indent + 1) + "CHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\n")
|
161 |
else:
|
162 |
f.write(" " * (indent + 1) + "CHANNELS 3 Zrotation Yrotation Xrotation\n")
|
163 |
for child in children[joint]:
|
@@ -180,6 +173,8 @@ def main():
|
|
180 |
except Exception as e:
|
181 |
print(f"Error during processing: {e}")
|
182 |
|
183 |
-
|
184 |
if __name__ == "__main__":
|
185 |
-
|
|
|
|
|
|
|
|
3 |
from scipy.spatial.transform import Rotation
|
4 |
from collections import deque
|
5 |
from tqdm import tqdm
|
6 |
+
import sys
|
7 |
+
import argparse
|
8 |
|
9 |
print(f"Saving 3D Motion")
|
10 |
|
|
|
11 |
def parse_obj(filename):
|
12 |
vertices = []
|
13 |
lines = []
|
|
|
24 |
except Exception as e:
|
25 |
raise ValueError(f"Error parsing OBJ file {filename}: {str(e)}")
|
26 |
|
|
|
27 |
def build_hierarchy(lines, root=0):
|
28 |
num_joints = max(max(line) for line in lines) + 1
|
29 |
adj = [[] for _ in range(num_joints)]
|
|
|
49 |
children[parent[c]].append(c)
|
50 |
return parent, children
|
51 |
|
|
|
52 |
def compute_offsets(vertices_ref, parent):
|
53 |
num_joints = len(vertices_ref)
|
54 |
offsets = np.zeros((num_joints, 3))
|
|
|
57 |
offsets[j] = vertices_ref[j] - vertices_ref[parent[j]]
|
58 |
return offsets
|
59 |
|
|
|
60 |
def compute_R_world(joint, vertices_ref, vertices_cur, children):
|
61 |
if not children[joint]:
|
62 |
return np.eye(3)
|
|
|
92 |
R = U @ Vh
|
93 |
return R
|
94 |
|
95 |
+
def main(output_dir):
|
|
|
|
|
|
|
96 |
folder = os.path.join(output_dir, 'obj_sequence')
|
97 |
+
|
98 |
try:
|
99 |
obj_files = sorted([f for f in os.listdir(folder) if f.endswith('.obj')])
|
100 |
except Exception as e:
|
|
|
113 |
root = 0
|
114 |
|
115 |
hierarchy_order = []
|
|
|
116 |
def dfs(joint):
|
117 |
hierarchy_order.append(joint)
|
118 |
for child in children[joint]:
|
119 |
dfs(child)
|
|
|
120 |
dfs(root)
|
121 |
|
122 |
motion_data = []
|
123 |
+
total_files = len(obj_files)
|
124 |
+
for i in range(total_files):
|
125 |
+
obj_file = obj_files[i]
|
126 |
vertices_cur = parse_obj(os.path.join(folder, obj_file))[0]
|
127 |
R_world = [compute_R_world(j, vertices_ref, vertices_cur, children) for j in range(num_joints)]
|
128 |
R_local = [R_world[j] if parent[j] == -1 else R_world[parent[j]].T @ R_world[j] for j in range(num_joints)]
|
129 |
euler_angles = [Rotation.from_matrix(R).as_euler('ZYX', degrees=True) for R in R_local]
|
130 |
root_pos = vertices_cur[root]
|
131 |
motion_line = list(root_pos) + list(euler_angles[root])
|
132 |
+
for j_idx, j in enumerate(hierarchy_order[1:], 1):
|
133 |
motion_line.extend(euler_angles[j])
|
134 |
+
progress = ((i / total_files) + (j_idx / len(hierarchy_order) / total_files)) * 100
|
135 |
+
print(f"PROGRESS:{progress:.2f}")
|
136 |
+
sys.stdout.flush()
|
137 |
motion_data.append(motion_line)
|
138 |
|
|
|
|
|
|
|
139 |
bvh_dir = os.path.join(output_dir, 'bvh')
|
140 |
os.makedirs(bvh_dir, exist_ok=True)
|
141 |
bvh_file = os.path.join(bvh_dir, 'output.bvh')
|
142 |
|
143 |
with open(bvh_file, 'w') as f:
|
144 |
f.write("HIERARCHY\n")
|
|
|
145 |
def write_hierarchy(joint, parent, f, indent=0):
|
146 |
if parent == -1:
|
147 |
f.write("ROOT Joint{}\n".format(joint))
|
|
|
150 |
f.write(" " * indent + "{\n")
|
151 |
f.write(" " * (indent + 1) + "OFFSET {:.6f} {:.6f} {:.6f}\n".format(*offsets[joint]))
|
152 |
if parent == -1:
|
153 |
+
f.write(" " * (indent + 1) + "CHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\n")
|
|
|
154 |
else:
|
155 |
f.write(" " * (indent + 1) + "CHANNELS 3 Zrotation Yrotation Xrotation\n")
|
156 |
for child in children[joint]:
|
|
|
173 |
except Exception as e:
|
174 |
print(f"Error during processing: {e}")
|
175 |
|
|
|
176 |
if __name__ == "__main__":
|
177 |
+
parser = argparse.ArgumentParser('Convert OBJ sequence to BVH.')
|
178 |
+
parser.add_argument('--output-dir', type=str, default='../outputs/', help='Output directory containing obj_sequence')
|
179 |
+
args = parser.parse_args()
|
180 |
+
main(args.output_dir)
|
convertNPZtoBVH/conver_obj.py
CHANGED
@@ -1,51 +1,19 @@
|
|
1 |
import numpy as np
|
2 |
import os
|
3 |
from datetime import datetime
|
|
|
4 |
|
5 |
def define_human_connections():
|
6 |
-
"""
|
7 |
-
Define connections for human stick figure with support for various poses
|
8 |
-
including crossed legs and complex movements
|
9 |
-
"""
|
10 |
return [
|
11 |
-
# Core body
|
12 |
-
[
|
13 |
-
[7,
|
14 |
-
[
|
15 |
-
[
|
16 |
-
|
17 |
-
# Arms (with complete chains)
|
18 |
-
# Left arm
|
19 |
-
[7, 14], # Spine to left shoulder
|
20 |
-
[14, 15], # Left upper arm
|
21 |
-
[15, 16], # Left forearm/hand
|
22 |
-
|
23 |
-
# Right arm
|
24 |
-
[7, 11], # Spine to right shoulder
|
25 |
-
[11, 12], # Right upper arm
|
26 |
-
[12, 13], # Right forearm/hand
|
27 |
-
|
28 |
-
# Legs with crossed support
|
29 |
-
# Left leg (now crossing to right)
|
30 |
-
[0, 1], # Hip to left thigh
|
31 |
-
[1, 2], # Left thigh to knee
|
32 |
-
[2, 3], # Left knee to foot
|
33 |
-
|
34 |
-
# Right leg
|
35 |
-
[0, 4], # Hip to right thigh
|
36 |
-
[4, 5], # Right thigh to knee
|
37 |
-
[5, 6], # Right knee to foot
|
38 |
-
|
39 |
-
# Structural connections
|
40 |
-
[14, 11], # Shoulder cross support
|
41 |
-
[1, 4], # Hip cross support
|
42 |
]
|
43 |
|
44 |
def npz_to_obj_sequence(npz_path, output_dir):
|
45 |
-
"""
|
46 |
-
Convert NPZ motion capture data to OBJ sequence
|
47 |
-
with enhanced support for various poses and movements
|
48 |
-
"""
|
49 |
os.makedirs(output_dir, exist_ok=True)
|
50 |
data = np.load(npz_path)
|
51 |
reconstruction = data['reconstruction'][0]
|
@@ -53,62 +21,35 @@ def npz_to_obj_sequence(npz_path, output_dir):
|
|
53 |
num_frames = reconstruction.shape[0]
|
54 |
connections = define_human_connections()
|
55 |
|
56 |
-
|
57 |
-
scale = 150.0 # Adjusted scale factor
|
58 |
|
59 |
for frame_idx in range(num_frames):
|
60 |
vertices = reconstruction[frame_idx]
|
61 |
output_path = os.path.join(output_dir, f"frame_{frame_idx:04d}.obj")
|
62 |
|
63 |
with open(output_path, 'w') as f:
|
64 |
-
# Write vertices with enhanced precision
|
65 |
for v in vertices:
|
66 |
-
# Coordinate system transformation with improved scaling
|
67 |
x, y, z = v[0] * scale, v[2] * scale, v[1] * scale
|
68 |
f.write(f"v {x:.8f} {y:.8f} {z:.8f}\n")
|
69 |
-
|
70 |
-
# Write connections
|
71 |
for conn in connections:
|
72 |
f.write(f"l {conn[0] + 1} {conn[1] + 1}\n")
|
73 |
-
|
74 |
|
75 |
def analyze_vertex_data(npz_path):
|
76 |
-
"""
|
77 |
-
Enhanced analysis function to help understand the motion data
|
78 |
-
and verify correct vertex positions
|
79 |
-
"""
|
80 |
data = np.load(npz_path)
|
81 |
reconstruction = data['reconstruction'][0]
|
82 |
-
|
83 |
-
|
84 |
-
# Calculate full range of motion
|
85 |
x_min, x_max = reconstruction[:,:,0].min(), reconstruction[:,:,0].max()
|
86 |
y_min, y_max = reconstruction[:,:,1].min(), reconstruction[:,:,1].max()
|
87 |
z_min, z_max = reconstruction[:,:,2].min(), reconstruction[:,:,2].max()
|
88 |
-
|
89 |
|
90 |
-
def process_motion_capture(npz_file):
|
91 |
try:
|
92 |
-
# Verify input file exists
|
93 |
if not os.path.exists(npz_file):
|
94 |
raise FileNotFoundError(f"Input file {npz_file} not found")
|
95 |
|
96 |
-
#
|
97 |
-
|
98 |
-
# print(output_dir)
|
99 |
-
os.makedirs(base_output_dir, exist_ok=True)
|
100 |
-
# base_output_dir = r"C:\Users\ROGST\Programming\Python\videotobvh\convertNPZtoBVH\outputs"
|
101 |
-
|
102 |
-
# Create a unique output directory with timestamp to avoid overwriting
|
103 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
104 |
-
output_dir = os.path.join(base_output_dir, f"obj_sequence")
|
105 |
-
|
106 |
-
# Analyze data first
|
107 |
analyze_vertex_data(npz_file)
|
108 |
-
|
109 |
-
# Convert to OBJ sequence
|
110 |
-
npz_to_obj_sequence(npz_path=npz_file, output_dir=output_dir)
|
111 |
-
|
112 |
|
113 |
except Exception as e:
|
114 |
print(f"Error processing motion capture data: {str(e)}")
|
@@ -118,24 +59,25 @@ def get_npz_paths(folder_path):
|
|
118 |
if not os.path.isdir(folder_path):
|
119 |
raise FileNotFoundError(f"Directory not found: {folder_path}")
|
120 |
|
121 |
-
# Find the first .npz file in the directory
|
122 |
for file in os.listdir(folder_path):
|
123 |
if file.endswith('.npz'):
|
124 |
-
|
125 |
-
return npz_path
|
126 |
|
127 |
-
# If no .npz file is found
|
128 |
raise FileNotFoundError(f"No NPZ files found in directory: {folder_path}")
|
129 |
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
if __name__ == "__main__":
|
131 |
-
|
132 |
-
|
133 |
-
os.makedirs(
|
134 |
-
input_dir = output_dir
|
135 |
|
136 |
try:
|
137 |
-
# Get the first available NPZ file from the directory
|
138 |
npz_file = get_npz_paths(input_dir)
|
139 |
-
process_motion_capture(npz_file)
|
140 |
except FileNotFoundError as e:
|
141 |
print(f"Error: {str(e)}")
|
|
|
1 |
import numpy as np
|
2 |
import os
|
3 |
from datetime import datetime
|
4 |
+
import argparse
|
5 |
|
6 |
def define_human_connections():
|
|
|
|
|
|
|
|
|
7 |
return [
|
8 |
+
[0, 7], [7, 8], [8, 9], [9, 10], # Core body
|
9 |
+
[7, 14], [14, 15], [15, 16], # Left arm
|
10 |
+
[7, 11], [11, 12], [12, 13], # Right arm
|
11 |
+
[0, 1], [1, 2], [2, 3], # Left leg
|
12 |
+
[0, 4], [4, 5], [5, 6], # Right leg
|
13 |
+
[14, 11], [1, 4], # Structural connections
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
]
|
15 |
|
16 |
def npz_to_obj_sequence(npz_path, output_dir):
|
|
|
|
|
|
|
|
|
17 |
os.makedirs(output_dir, exist_ok=True)
|
18 |
data = np.load(npz_path)
|
19 |
reconstruction = data['reconstruction'][0]
|
|
|
21 |
num_frames = reconstruction.shape[0]
|
22 |
connections = define_human_connections()
|
23 |
|
24 |
+
scale = 150.0
|
|
|
25 |
|
26 |
for frame_idx in range(num_frames):
|
27 |
vertices = reconstruction[frame_idx]
|
28 |
output_path = os.path.join(output_dir, f"frame_{frame_idx:04d}.obj")
|
29 |
|
30 |
with open(output_path, 'w') as f:
|
|
|
31 |
for v in vertices:
|
|
|
32 |
x, y, z = v[0] * scale, v[2] * scale, v[1] * scale
|
33 |
f.write(f"v {x:.8f} {y:.8f} {z:.8f}\n")
|
|
|
|
|
34 |
for conn in connections:
|
35 |
f.write(f"l {conn[0] + 1} {conn[1] + 1}\n")
|
|
|
36 |
|
37 |
def analyze_vertex_data(npz_path):
|
|
|
|
|
|
|
|
|
38 |
data = np.load(npz_path)
|
39 |
reconstruction = data['reconstruction'][0]
|
|
|
|
|
|
|
40 |
x_min, x_max = reconstruction[:,:,0].min(), reconstruction[:,:,0].max()
|
41 |
y_min, y_max = reconstruction[:,:,1].min(), reconstruction[:,:,1].max()
|
42 |
z_min, z_max = reconstruction[:,:,2].min(), reconstruction[:,:,2].max()
|
|
|
43 |
|
44 |
+
def process_motion_capture(npz_file, output_dir):
|
45 |
try:
|
|
|
46 |
if not os.path.exists(npz_file):
|
47 |
raise FileNotFoundError(f"Input file {npz_file} not found")
|
48 |
|
49 |
+
# Use the provided output_dir with 'obj_sequence' subfolder
|
50 |
+
obj_output_dir = os.path.join(output_dir, "obj_sequence")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
analyze_vertex_data(npz_file)
|
52 |
+
npz_to_obj_sequence(npz_path=npz_file, output_dir=obj_output_dir)
|
|
|
|
|
|
|
53 |
|
54 |
except Exception as e:
|
55 |
print(f"Error processing motion capture data: {str(e)}")
|
|
|
59 |
if not os.path.isdir(folder_path):
|
60 |
raise FileNotFoundError(f"Directory not found: {folder_path}")
|
61 |
|
|
|
62 |
for file in os.listdir(folder_path):
|
63 |
if file.endswith('.npz'):
|
64 |
+
return os.path.join(folder_path, file)
|
|
|
65 |
|
|
|
66 |
raise FileNotFoundError(f"No NPZ files found in directory: {folder_path}")
|
67 |
|
68 |
+
def arg_parse():
|
69 |
+
parser = argparse.ArgumentParser('Convert NPZ to OBJ sequence.')
|
70 |
+
parser.add_argument('--output-dir', type=str, default='../outputs/', help='Output directory for results')
|
71 |
+
args = parser.parse_args()
|
72 |
+
return args
|
73 |
+
|
74 |
if __name__ == "__main__":
|
75 |
+
args = arg_parse()
|
76 |
+
input_dir = os.path.join(os.path.abspath(args.output_dir), 'npz')
|
77 |
+
os.makedirs(input_dir, exist_ok=True)
|
|
|
78 |
|
79 |
try:
|
|
|
80 |
npz_file = get_npz_paths(input_dir)
|
81 |
+
process_motion_capture(npz_file, args.output_dir)
|
82 |
except FileNotFoundError as e:
|
83 |
print(f"Error: {str(e)}")
|