Datasets:

License:
sephyli's picture
Upload folder using huggingface_hub
c1eaa40
raw
history blame
4.5 kB
import numpy as np
from pyquaternion import Quaternion
from nuplan.database.utils.pointclouds.pointcloud import PointCloud
def _load_points(pc_file_name):
pc = PointCloud.parse_from_file(pc_file_name).to_pcd_bin2().T
return pc
def transform_pcs_to_images(
pc,
cam2lidar_rotation,
cam2lidar_translation,
cam_intrinsic,
img_shape=None,
eps=1e-3,
return_depth=False,
):
"""Transform point clouds from LiDAR coordinates to the camera coordinates.
Args:
pc: a numpy array with shape [-1, 6]
cam_infos: dict of camera information.
Return:
pc_cam: dict of 2d coordinates in corresponding camera space.
"""
pc_xyz = pc[:, :3]
lidar2cam_r = np.linalg.inv(cam2lidar_rotation)
lidar2cam_t = cam2lidar_translation @ lidar2cam_r.T
lidar2cam_rt = np.eye(4)
lidar2cam_rt[:3, :3] = lidar2cam_r.T
lidar2cam_rt[3, :3] = -lidar2cam_t
viewpad = np.eye(4)
viewpad[: cam_intrinsic.shape[0], : cam_intrinsic.shape[1]] = cam_intrinsic
lidar2img_rt = viewpad @ lidar2cam_rt.T
cur_pc_xyz = np.concatenate([pc_xyz, np.ones_like(pc_xyz)[:, :1]], -1)
cur_pc_cam = lidar2img_rt @ cur_pc_xyz.T
cur_pc_cam = cur_pc_cam.T
cur_pc_in_fov = cur_pc_cam[:, 2] > eps
depth = cur_pc_cam[..., 2:3]
cur_pc_cam = cur_pc_cam[..., 0:2] / np.maximum(
cur_pc_cam[..., 2:3], np.ones_like(cur_pc_cam[..., 2:3]) * eps
)
if img_shape is not None:
img_h, img_w = img_shape
cur_pc_in_fov = (
cur_pc_in_fov
& (cur_pc_cam[:, 0] < (img_w - 1))
& (cur_pc_cam[:, 0] > 0)
& (cur_pc_cam[:, 1] < (img_h - 1))
& (cur_pc_cam[:, 1] > 0)
)
if return_depth:
cur_pc_cam = np.concatenate([cur_pc_cam, depth], axis=-1)
return cur_pc_cam, cur_pc_in_fov
def transform_cam_to_img(pc_cam, cam_intrinsic, img_shape=None, eps=1e-3, return_depth=False):
"""Transform point clouds from LiDAR coordinates to the camera coordinates.
Args:
pc: a numpy array with shape [-1, 6]
cam_infos: dict of camera information.
Return:
pc_cam: dict of 2d coordinates in corresponding camera space.
"""
pc_cam = pc_cam[:, :3]
viewpad = np.eye(4)
viewpad[: cam_intrinsic.shape[0], : cam_intrinsic.shape[1]] = cam_intrinsic
pc_img = np.concatenate([pc_cam, np.ones_like(pc_cam)[:, :1]], -1)
pc_img = viewpad @ pc_img.T
pc_img = pc_img.T
cur_pc_in_fov = pc_img[:, 2] > eps
depth = pc_img[..., 2:3]
pc_img = pc_img[..., 0:2] / np.maximum(pc_img[..., 2:3], np.ones_like(pc_img[..., 2:3]) * eps)
if img_shape is not None:
img_h, img_w = img_shape
cur_pc_in_fov = (
cur_pc_in_fov
& (pc_img[:, 0] < (img_w - 1))
& (pc_img[:, 0] > 0)
& (pc_img[:, 1] < (img_h - 1))
& (pc_img[:, 1] > 0)
)
if return_depth:
pc_img = np.concatenate([pc_img, depth], axis=-1)
return pc_img, cur_pc_in_fov
def transform_nuplan_boxes_to_cam(box, cam2lidar_rotation, cam2lidar_translation):
"""Transform point clouds from LiDAR coordinates to the camera coordinates.
Args:
box: a numpy array with shape [-1, 7]
"""
locs, dims, rots = box[:, :3], box[:, 3:6], box[:, 6:]
dims_cams = dims[:, [0, 2, 1]] # l, w, h -> l, h, w
rots_cam = np.zeros_like(rots)
for idx, rot in enumerate(rots):
rot = Quaternion(axis=[0, 0, 1], radians=rot)
rot = Quaternion(matrix=cam2lidar_rotation).inverse * rot
rots_cam[idx] = -rot.yaw_pitch_roll[0]
lidar2cam_r = np.linalg.inv(cam2lidar_rotation)
lidar2cam_t = cam2lidar_translation @ lidar2cam_r.T
lidar2cam_rt = np.eye(4)
lidar2cam_rt[:3, :3] = lidar2cam_r.T
lidar2cam_rt[3, :3] = -lidar2cam_t
locs_cam = np.concatenate([locs, np.ones_like(locs)[:, :1]], -1) # -1, 4
locs_cam = lidar2cam_rt.T @ locs_cam.T
locs_cam = locs_cam.T
locs_cam = locs_cam[:, :-1]
return np.concatenate([locs_cam, dims_cams, rots_cam], -1)
def transform_sweep_pc_to_lidar_top(sweep_pc, sensor2lidar_rotation, sensor2lidar_translation):
sweep_xyz = sweep_pc[:, :3]
sweep_xyz = np.concatenate([sweep_xyz, np.ones_like(sweep_xyz)[:, :1]], -1)
sensor2lidar_rt = np.eye(4)
sensor2lidar_rt[:3, :3] = sensor2lidar_rotation.T
sensor2lidar_rt[3, :3] = sensor2lidar_translation
sweep_xyz = sweep_xyz @ sensor2lidar_rt
return sweep_xyz[:, :3]