File size: 7,037 Bytes
d1dbe71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import numpy as np
import open3d as o3d

def improved_multiway_registration(pcds, voxel_size=0.05, max_correspondence_distance_coarse=None, max_correspondence_distance_fine=None, overlap=3, quadratic_overlap=True, use_colored_icp=True):
    if max_correspondence_distance_coarse is None:
        max_correspondence_distance_coarse = voxel_size * 15
    if max_correspondence_distance_fine is None:
        max_correspondence_distance_fine = voxel_size * 1.5

    def preprocess_point_cloud(pcd, voxel_size):
        pcd_down = pcd.voxel_down_sample(voxel_size)
        pcd_down.estimate_normals(
            o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 2, max_nn=30))
        # Apply statistical outlier removal
        cl, ind = pcd_down.remove_statistical_outlier(nb_neighbors=20, std_ratio=2.0)
        pcd_down = pcd_down.select_by_index(ind)
        return pcd_down

    def pairwise_registration(source, target, use_colored_icp, voxel_size, max_correspondence_distance_coarse, max_correspondence_distance_fine):
        current_transformation = np.identity(4)  # Start with identity matrix

        if use_colored_icp:
            print("Apply colored point cloud registration")
            voxel_radius = [5*voxel_size, 3*voxel_size, voxel_size]
            max_iter = [60, 35, 20]
            
            for scale in range(3):
                iter = max_iter[scale]
                radius = voxel_radius[scale]

                source_down = source.voxel_down_sample(radius)
                target_down = target.voxel_down_sample(radius)

                source_down.estimate_normals(
                    o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))
                target_down.estimate_normals(
                    o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))

                try:
                    result_icp = o3d.pipelines.registration.registration_colored_icp(
                        source_down, target_down, radius, current_transformation,
                        o3d.pipelines.registration.TransformationEstimationForColoredICP(),
                        o3d.pipelines.registration.ICPConvergenceCriteria(relative_fitness=1e-6,
                                                                        relative_rmse=1e-6,
                                                                        max_iteration=iter))
                    current_transformation = result_icp.transformation
                except RuntimeError as e:
                    print(f"Colored ICP failed at scale {scale}: {str(e)}")
                    print("Keeping the previous transformation")
                    # We keep the previous transformation, no need to reassign

            transformation_icp = current_transformation
        else:
            print("Apply point-to-plane ICP")
            try:
                icp_coarse = o3d.pipelines.registration.registration_icp(
                    source, target, max_correspondence_distance_coarse, current_transformation,
                    o3d.pipelines.registration.TransformationEstimationPointToPlane())
                current_transformation = icp_coarse.transformation
                
                icp_fine = o3d.pipelines.registration.registration_icp(
                    source, target, max_correspondence_distance_fine,
                    current_transformation,
                    o3d.pipelines.registration.TransformationEstimationPointToPlane())
                transformation_icp = icp_fine.transformation
            except RuntimeError as e:
                print(f"Point-to-plane ICP failed: {str(e)}")
                print("Keeping the best available transformation")
                transformation_icp = current_transformation

        try:
            information_icp = o3d.pipelines.registration.get_information_matrix_from_point_clouds(
                source, target, max_correspondence_distance_fine,
                transformation_icp)
        except RuntimeError as e:
            print(f"Failed to compute information matrix: {str(e)}")
            print("Using identity information matrix")
            information_icp = np.identity(6)

        return transformation_icp, information_icp

    def full_registration(pcds_down):
        pose_graph = o3d.pipelines.registration.PoseGraph()
        odometry = np.identity(4)
        pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(odometry))
        n_pcds = len(pcds_down)
        
        pairs = []
        for i in range(n_pcds - 1):
            for j in range(i + 1, min(i + overlap + 1, n_pcds)):
                pairs.append((i, j))
                if quadratic_overlap:
                    q = 2**(j-i)
                    if q > overlap and i + q < n_pcds:
                        pairs.append((i, i + q))
        
        for source_id, target_id in pairs:
            transformation_icp, information_icp = pairwise_registration(
                pcds_down[source_id], pcds_down[target_id], use_colored_icp,
                voxel_size, max_correspondence_distance_coarse, max_correspondence_distance_fine)
            print(f"Build PoseGraph: {source_id} -> {target_id}")
            
            if target_id == source_id + 1:
                odometry = np.dot(transformation_icp, odometry)
                pose_graph.nodes.append(
                    o3d.pipelines.registration.PoseGraphNode(
                        np.linalg.inv(odometry)))
            
            pose_graph.edges.append(
                o3d.pipelines.registration.PoseGraphEdge(source_id,
                                                         target_id,
                                                         transformation_icp,
                                                         information_icp,
                                                         uncertain=False))
        return pose_graph

    # Preprocess point clouds
    print("Preprocessing point clouds...")
    pcds_down = [preprocess_point_cloud(pcd, voxel_size) for pcd in pcds]

    print("Full registration ...")
    pose_graph = full_registration(pcds_down)
    
    print("Optimizing PoseGraph ...")
    option = o3d.pipelines.registration.GlobalOptimizationOption(
        max_correspondence_distance=max_correspondence_distance_fine,
        edge_prune_threshold=0.25,
        reference_node=0)
    
    with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:
        o3d.pipelines.registration.global_optimization(
            pose_graph,
            o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt(),
            o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria(),
            option)

    print("Transform points and combine")
    pcd_combined = o3d.geometry.PointCloud()
    for point_id in range(len(pcds)):
        print(pose_graph.nodes[point_id].pose)
        pcds[point_id].transform(pose_graph.nodes[point_id].pose)
        pcd_combined += pcds[point_id]
    
    return pcd_combined, pose_graph