Datasets:

License:
File size: 4,503 Bytes
c1eaa40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import numpy as np
from pyquaternion import Quaternion

from nuplan.database.utils.pointclouds.pointcloud import PointCloud


def _load_points(pc_file_name):
    pc = PointCloud.parse_from_file(pc_file_name).to_pcd_bin2().T
    return pc


def transform_pcs_to_images(
    pc,
    cam2lidar_rotation,
    cam2lidar_translation,
    cam_intrinsic,
    img_shape=None,
    eps=1e-3,
    return_depth=False,
):
    """Transform point clouds from LiDAR coordinates to the camera coordinates.

    Args:
        pc: a numpy array with shape [-1, 6]
        cam_infos: dict of camera information.
    Return:
        pc_cam: dict of 2d coordinates in corresponding camera space.
    """
    pc_xyz = pc[:, :3]

    lidar2cam_r = np.linalg.inv(cam2lidar_rotation)
    lidar2cam_t = cam2lidar_translation @ lidar2cam_r.T
    lidar2cam_rt = np.eye(4)
    lidar2cam_rt[:3, :3] = lidar2cam_r.T
    lidar2cam_rt[3, :3] = -lidar2cam_t

    viewpad = np.eye(4)
    viewpad[: cam_intrinsic.shape[0], : cam_intrinsic.shape[1]] = cam_intrinsic
    lidar2img_rt = viewpad @ lidar2cam_rt.T

    cur_pc_xyz = np.concatenate([pc_xyz, np.ones_like(pc_xyz)[:, :1]], -1)
    cur_pc_cam = lidar2img_rt @ cur_pc_xyz.T
    cur_pc_cam = cur_pc_cam.T

    cur_pc_in_fov = cur_pc_cam[:, 2] > eps
    depth = cur_pc_cam[..., 2:3]

    cur_pc_cam = cur_pc_cam[..., 0:2] / np.maximum(
        cur_pc_cam[..., 2:3], np.ones_like(cur_pc_cam[..., 2:3]) * eps
    )
    if img_shape is not None:
        img_h, img_w = img_shape
        cur_pc_in_fov = (
            cur_pc_in_fov
            & (cur_pc_cam[:, 0] < (img_w - 1))
            & (cur_pc_cam[:, 0] > 0)
            & (cur_pc_cam[:, 1] < (img_h - 1))
            & (cur_pc_cam[:, 1] > 0)
        )
    if return_depth:
        cur_pc_cam = np.concatenate([cur_pc_cam, depth], axis=-1)
    return cur_pc_cam, cur_pc_in_fov


def transform_cam_to_img(pc_cam, cam_intrinsic, img_shape=None, eps=1e-3, return_depth=False):
    """Transform point clouds from LiDAR coordinates to the camera coordinates.

    Args:
        pc: a numpy array with shape [-1, 6]
        cam_infos: dict of camera information.
    Return:
        pc_cam: dict of 2d coordinates in corresponding camera space.
    """
    pc_cam = pc_cam[:, :3]

    viewpad = np.eye(4)
    viewpad[: cam_intrinsic.shape[0], : cam_intrinsic.shape[1]] = cam_intrinsic

    pc_img = np.concatenate([pc_cam, np.ones_like(pc_cam)[:, :1]], -1)
    pc_img = viewpad @ pc_img.T
    pc_img = pc_img.T

    cur_pc_in_fov = pc_img[:, 2] > eps
    depth = pc_img[..., 2:3]

    pc_img = pc_img[..., 0:2] / np.maximum(pc_img[..., 2:3], np.ones_like(pc_img[..., 2:3]) * eps)
    if img_shape is not None:
        img_h, img_w = img_shape
        cur_pc_in_fov = (
            cur_pc_in_fov
            & (pc_img[:, 0] < (img_w - 1))
            & (pc_img[:, 0] > 0)
            & (pc_img[:, 1] < (img_h - 1))
            & (pc_img[:, 1] > 0)
        )
    if return_depth:
        pc_img = np.concatenate([pc_img, depth], axis=-1)
    return pc_img, cur_pc_in_fov


def transform_nuplan_boxes_to_cam(box, cam2lidar_rotation, cam2lidar_translation):
    """Transform point clouds from LiDAR coordinates to the camera coordinates.

    Args:
        box: a numpy array with shape [-1, 7]
    """
    locs, dims, rots = box[:, :3], box[:, 3:6], box[:, 6:]
    dims_cams = dims[:, [0, 2, 1]]  # l, w, h -> l, h, w

    rots_cam = np.zeros_like(rots)
    for idx, rot in enumerate(rots):
        rot = Quaternion(axis=[0, 0, 1], radians=rot)
        rot = Quaternion(matrix=cam2lidar_rotation).inverse * rot
        rots_cam[idx] = -rot.yaw_pitch_roll[0]

    lidar2cam_r = np.linalg.inv(cam2lidar_rotation)
    lidar2cam_t = cam2lidar_translation @ lidar2cam_r.T
    lidar2cam_rt = np.eye(4)
    lidar2cam_rt[:3, :3] = lidar2cam_r.T
    lidar2cam_rt[3, :3] = -lidar2cam_t

    locs_cam = np.concatenate([locs, np.ones_like(locs)[:, :1]], -1)  # -1, 4
    locs_cam = lidar2cam_rt.T @ locs_cam.T
    locs_cam = locs_cam.T
    locs_cam = locs_cam[:, :-1]
    return np.concatenate([locs_cam, dims_cams, rots_cam], -1)


def transform_sweep_pc_to_lidar_top(sweep_pc, sensor2lidar_rotation, sensor2lidar_translation):
    sweep_xyz = sweep_pc[:, :3]
    sweep_xyz = np.concatenate([sweep_xyz, np.ones_like(sweep_xyz)[:, :1]], -1)

    sensor2lidar_rt = np.eye(4)
    sensor2lidar_rt[:3, :3] = sensor2lidar_rotation.T
    sensor2lidar_rt[3, :3] = sensor2lidar_translation

    sweep_xyz = sweep_xyz @ sensor2lidar_rt
    return sweep_xyz[:, :3]