Datasets:

License:
OpenScene / openscene-v1.1 /process_data /create_nuplan_data_with_vis.py
sephyli's picture
Upload folder using huggingface_hub
c1eaa40
raw
history blame
17.7 kB
import argparse
import shutil
from typing import Dict, List
# import mmcv
import numpy as np
from os import listdir
from os.path import isfile, join
from pyquaternion import Quaternion
import cv2
from tqdm import tqdm
import os
import multiprocessing
import pickle
from nuplan.common.actor_state.state_representation import StateSE2
from nuplan.common.maps.abstract_map import AbstractMap
from nuplan.common.maps.nuplan_map.map_factory import get_maps_api
from nuplan.database.nuplan_db_orm.nuplandb_wrapper import NuPlanDBWrapper
from nuplan.database.nuplan_db_orm.lidar import Lidar
from nuplan.database.nuplan_db.nuplan_scenario_queries import (
get_traffic_light_status_for_lidarpc_token_from_db,
get_images_from_lidar_tokens,
get_cameras,
)
from nuplan.planning.scenario_builder.nuplan_db.nuplan_scenario import CameraChannel
from navsim.common.extraction.driving_command import get_driving_command
from helpers.multiprocess_helper import get_scenes_per_thread
from helpers.canbus import CanBus
from helpers.multisweep_helper import obtain_sensor2top
NUPLAN_MAPS_ROOT = os.environ["NUPLAN_MAPS_ROOT"]
filtered_classes = ["traffic_cone", "barrier", "czone_sign", "generic_object"]
def create_nuplan_info(
nuplan_db_wrapper: NuPlanDBWrapper, db_names: List[str], args
):
nuplan_sensor_root = args.nuplan_sensor_path
# get all db files & assign db files for current thread.
scene_dict = {}
log_sensors = os.listdir(nuplan_sensor_root)
# For each sequence...
for log_db_name in db_names:
log_db = nuplan_db_wrapper.get_log_db(log_db_name)
log_name = log_db.log_name
log_token = log_db.log.token
map_location = log_db.log.location
vehicle_name = log_db.log.vehicle_name
# NOTE: I am unsure why "us-nv-las-vegas-strip" is saved as "las_vegas" in db logs.
map_name = map_location if map_location != "las_vegas" else "us-nv-las-vegas-strip"
map_api = get_maps_api(NUPLAN_MAPS_ROOT, "nuplan-maps-v1.0", map_name) # NOTE: lru cached
log_file = os.path.join(nuplan_db_path, log_db_name + ".db")
if log_db_name not in log_sensors:
continue
frame_idx = 0
# list (sequence) of point clouds (each frame).
lidar_pc_list = log_db.lidar_pc
lidar_pcs = lidar_pc_list
# get log cam infos
log_cam_infos = {}
for cam in get_cameras(log_file, [str(channel.value) for channel in CameraChannel]):
intrinsics = np.array(pickle.loads(cam.intrinsic), dtype=np.float32)
translation = np.array(pickle.loads(cam.translation), dtype=np.float32)
rotation = np.array(pickle.loads(cam.rotation), dtype=np.float32)
rotation = Quaternion(rotation).rotation_matrix
distortion = np.array(pickle.loads(cam.distortion), dtype=np.float32)
c = dict(
intrinsic=intrinsics,
distortion=distortion,
translation=translation,
rotation=rotation,
)
log_cam_infos[cam.token] = c
# Find the first valid point clouds, with all 8 cameras available.
for start_idx in range(0, len(lidar_pcs)):
retrieved_images = get_images_from_lidar_tokens(
log_file,
[lidar_pcs[start_idx].token],
[str(channel.value) for channel in CameraChannel],
)
if len(list(retrieved_images)) == 8:
break
# Find the true LiDAR start_idx with the minimum timestamp difference with CAM_F0.
retrieved_images_0 = get_images_from_lidar_tokens(
log_file, [lidar_pcs[start_idx].token], ["CAM_F0"]
)
diff_0 = abs(list(retrieved_images_0)[0].timestamp - lidar_pcs[start_idx].timestamp)
retrieved_images_1 = get_images_from_lidar_tokens(
log_file, [lidar_pcs[start_idx + 1].token], ["CAM_F0"]
)
diff_1 = abs(list(retrieved_images_1)[0].timestamp - lidar_pcs[start_idx + 1].timestamp)
start_idx = start_idx if diff_0 < diff_1 else start_idx + 1
# Find key_frames (controled by args.sample_interval)
lidar_pc_list = lidar_pc_list[start_idx :: args.sample_interval]
index = -1
for lidar_pc in tqdm(lidar_pc_list, dynamic_ncols=True):
index += 1
# LiDAR attributes.
lidar_pc_token = lidar_pc.token
scene_token = lidar_pc.scene_token
pc_file_name = lidar_pc.filename
next_token = lidar_pc.next_token
prev_token = lidar_pc.prev_token
lidar_token = lidar_pc.lidar_token
time_stamp = lidar_pc.timestamp
scene_name = "log-" + lidar_pc.scene.name
lidar_boxes = lidar_pc.lidar_boxes
roadblock_ids = [
str(roadblock_id)
for roadblock_id in str(lidar_pc.scene.roadblock_ids).split(" ")
if len(roadblock_id) > 0
]
# Saving configurations.
if scene_token not in scene_dict.keys():
scene_dict[scene_token] = []
frame_idx = 0
if frame_idx == 0:
scene_dict[scene_token] = []
can_bus = CanBus(lidar_pc).tensor
lidar = log_db.session.query(Lidar).filter(Lidar.token == lidar_token).all()
pc_file_path = os.path.join(args.nuplan_sensor_path, pc_file_name)
if not os.path.exists(pc_file_path): # some lidar files are missing.
# print(pc_file_path)
with open("./nofile.log", "a") as f:
f.write(pc_file_path)
f.write("\n")
continue
traffic_lights = []
for traffic_light_status in get_traffic_light_status_for_lidarpc_token_from_db(
log_file, lidar_pc_token
):
lane_connector_id: int = traffic_light_status.lane_connector_id
is_red: bool = traffic_light_status.status.value == 2
traffic_lights.append((lane_connector_id, is_red))
ego_pose = StateSE2(
lidar_pc.ego_pose.x,
lidar_pc.ego_pose.y,
lidar_pc.ego_pose.quaternion.yaw_pitch_roll[0],
)
driving_command = get_driving_command(ego_pose, map_api, roadblock_ids)
info = {
"token": lidar_pc_token,
"frame_idx": frame_idx,
"timestamp": time_stamp,
"log_name": log_name,
"log_token": log_token,
"scene_name": scene_name,
"scene_token": scene_token,
"map_location": map_location,
"roadblock_ids": roadblock_ids,
"vehicle_name": vehicle_name,
"can_bus": can_bus,
"lidar_path": pc_file_name, # use the relative path.
"lidar2ego_translation": lidar[0].translation_np,
"lidar2ego_rotation": [
lidar[0].rotation.w,
lidar[0].rotation.x,
lidar[0].rotation.y,
lidar[0].rotation.z,
],
"ego2global_translation": can_bus[:3],
"ego2global_rotation": can_bus[3:7],
"ego_dynamic_state": [
lidar_pc.ego_pose.vx,
lidar_pc.ego_pose.vy,
lidar_pc.ego_pose.acceleration_x,
lidar_pc.ego_pose.acceleration_y,
],
"traffic_lights": traffic_lights,
"driving_command": driving_command,
"cams": dict(),
"prev_sweep_token": prev_token,
"next_sweep_token": next_token,
"sweeps": [],
}
info["sample_prev"] = None
info["sample_next"] = None
if index > 0: # find prev.
info["sample_prev"] = lidar_pc_list[index - 1].token
if index < len(lidar_pc_list) - 1: # find next.
next_key_token = lidar_pc_list[index + 1].token
next_key_scene = lidar_pc_list[index + 1].scene_token
info["sample_next"] = next_key_token
else:
next_key_token, next_key_scene = None, None
if next_key_token == None or next_key_token == "":
frame_idx = 0
else:
if next_key_scene != scene_token:
frame_idx = 0
else:
frame_idx += 1
# Parse lidar2ego translation.
l2e_r = info["lidar2ego_rotation"]
l2e_t = info["lidar2ego_translation"]
e2g_r = info["ego2global_rotation"]
e2g_t = info["ego2global_translation"]
l2e_r_mat = Quaternion(l2e_r).rotation_matrix
e2g_r_mat = Quaternion(e2g_r).rotation_matrix
# add lidar2global: map point coord in lidar to point coord in the global
l2e = np.eye(4)
l2e[:3, :3] = l2e_r_mat
l2e[:3, -1] = l2e_t
e2g = np.eye(4)
e2g[:3, :3] = e2g_r_mat
e2g[:3, -1] = e2g_t
lidar2global = np.dot(e2g, l2e)
info["ego2global"] = e2g
info["lidar2ego"] = l2e
info["lidar2global"] = lidar2global
# obtain 8 image's information per frame
retrieved_images = get_images_from_lidar_tokens(
log_file, [lidar_pc.token], [str(channel.value) for channel in CameraChannel]
)
cams = {}
for img in retrieved_images:
channel = img.channel
filename = img.filename_jpg
filepath = os.path.join(args.nuplan_sensor_path, filename)
if not os.path.exists(filepath):
frame_str = f"{log_db_name}, {lidar_pc_token}"
tqdm.tqdm.write(f"camera file missing: {frame_str}")
continue
cam_info = log_cam_infos[img.camera_token]
cams[channel] = dict(
data_path=filename, # use the relative path.
sensor2lidar_rotation=cam_info["rotation"],
sensor2lidar_translation=cam_info["translation"],
cam_intrinsic=cam_info["intrinsic"],
distortion=cam_info["distortion"],
)
if len(cams) != 8:
frame_str = f"{log_db_name}, {lidar_pc_token}"
tqdm.write(f"not all cameras are available: {frame_str}")
continue
info["cams"] = cams
# parse sweeps if assigned.
sweeps = []
tmp_info = info
count = 0
while len(sweeps) < args.max_sweeps:
if tmp_info["prev_sweep_token"] == None:
break
# Get the previous sweep and update info to previous sweep.
sweep = obtain_sensor2top(
tmp_info["prev_sweep_token"], log_db, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, args
)
# Save sweeps in every sweep_interval.
tmp_info = sweep
if count == args.sweep_interval:
if os.path.exists(sweep["data_path"]):
sweeps.append(sweep)
count = 0
else:
count += 1
info["sweeps"] = sweeps
# Parse 3D object labels.
if not args.is_test:
if args.filter_instance:
fg_lidar_boxes = [
box for box in lidar_boxes if box.category.name not in filtered_classes
]
else:
fg_lidar_boxes = lidar_boxes
instance_tokens = [item.token for item in fg_lidar_boxes]
track_tokens = [item.track_token for item in fg_lidar_boxes]
inv_ego_r = lidar_pc.ego_pose.trans_matrix_inv
ego_yaw = lidar_pc.ego_pose.quaternion.yaw_pitch_roll[0]
locs = np.array(
[
np.dot(
inv_ego_r[:3, :3],
(b.translation_np - lidar_pc.ego_pose.translation_np).T,
).T
for b in fg_lidar_boxes
]
).reshape(-1, 3)
dims = np.array([[b.length, b.width, b.height] for b in fg_lidar_boxes]).reshape(
-1, 3
)
rots = np.array([b.yaw for b in fg_lidar_boxes]).reshape(-1, 1)
rots = rots - ego_yaw
velocity = np.array([[b.vx, b.vy] for b in fg_lidar_boxes]).reshape(-1, 2)
velocity_3d = np.array([[b.vx, b.vy, b.vz] for b in fg_lidar_boxes]).reshape(-1, 3)
# convert velo from global to lidar: only need the rotation matrix
for i in range(len(fg_lidar_boxes)):
velo = np.array([*velocity[i], 0.0])
velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T
velocity[i] = velo[:2]
for i in range(len(fg_lidar_boxes)):
velo = velocity_3d[i]
velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T
velocity_3d[i] = velo
names = [box.category.name for box in fg_lidar_boxes]
names = np.array(names)
gt_boxes_nuplan = np.concatenate([locs, dims, rots], axis=1)
info["anns"] = dict(
gt_boxes=gt_boxes_nuplan,
gt_names=names,
gt_velocity_3d=velocity_3d.reshape(-1, 3),
instance_tokens=instance_tokens,
track_tokens=track_tokens,
)
scene_dict[scene_token].append(info)
del map_api
pkl_file_path = f"{args.out_dir}/{log_name}.pkl"
os.makedirs(args.out_dir, exist_ok=True)
with open(pkl_file_path, "wb") as f:
pickle.dump(dict(scene_dict), f, protocol=pickle.HIGHEST_PROTOCOL)
def parse_args():
parser = argparse.ArgumentParser(description="Train a detector")
parser.add_argument(
"--thread-num", type=int, default=50, help="number of threads for multi-processing."
)
# directory configurations.
parser.add_argument("--nuplan-root-path", help="the path to nuplan root path.")
parser.add_argument("--nuplan-db-path", help="the dir saving nuplan db.")
parser.add_argument("--nuplan-sensor-path", help="the dir to nuplan sensor data.")
parser.add_argument("--nuplan-map-version", help="nuplan mapping dataset version.")
parser.add_argument("--nuplan-map-root", help="path to nuplan map data.")
parser.add_argument("--out-dir", help="output path.")
# data configurations.
parser.add_argument("--max-sweeps", type=int, default=10, help="number of point cloud sweeps.")
parser.add_argument(
"--sweep-interval", type=int, default=5, help="interval of point cloud sweeps."
)
parser.add_argument(
"--sample-interval", type=int, default=10, help="interval of key frame samples."
)
parser.add_argument(
"--scene-process-type",
type=str,
default="skip",
help="process type when a scene is processed.",
)
# TODO.
parser.add_argument("--save-bev-images", action="store_true", help="XXX")
parser.add_argument("--save_surround_images", action="store_true", help="XXX")
# split.
parser.add_argument("--is-test", action="store_true", help="Dealing with Test set data.")
parser.add_argument(
"--filter-instance", action="store_true", help="Ignore instances in filtered_classes."
)
parser.add_argument("--split", type=str, default="train", help="Train/Val/Test set.")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
nuplan_root_path = args.nuplan_root_path
nuplan_db_path = args.nuplan_db_path
nuplan_sensor_path = args.nuplan_sensor_path
nuplan_map_version = args.nuplan_map_version
nuplan_map_root = args.nuplan_map_root
out_dir = args.out_dir
nuplan_db_wrapper = NuPlanDBWrapper(
data_root=nuplan_root_path,
map_root=nuplan_map_root,
db_files=nuplan_db_path,
map_version=nuplan_map_version,
)
nuplan_db_path = args.nuplan_db_path
db_names_with_extension = [
f for f in listdir(nuplan_db_path) if isfile(join(nuplan_db_path, f))
]
db_names = [name[:-3] for name in db_names_with_extension]
db_names.sort()
print(db_names)
db_names_split = np.split(np.array(db_names), args.thread_num)
manager = multiprocessing.Manager()
# return_dict = manager.dict()
threads = []
for x in range(args.thread_num):
t = multiprocessing.Process(
target=create_nuplan_info,
name=str(x),
args=(nuplan_db_wrapper, db_names_split[x], args),
)
threads.append(t)
for thr in threads:
thr.start()
for thr in threads:
if thr.is_alive():
thr.join()
# pkl_file_path = f"{args.out_dir}/{args.split}.pkl"
# os.makedirs(args.out_dir, exist_ok=True)
# with open(pkl_file_path, "wb") as f:
# pickle.dump(dict(return_dict), f, protocol=pickle.HIGHEST_PROTOCOL)