ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
Registration.cpp
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - CloudViewer: www.cloudViewer.org -
3 // ----------------------------------------------------------------------------
4 // Copyright (c) 2018-2024 www.cloudViewer.org
5 // SPDX-License-Identifier: MIT
6 // ----------------------------------------------------------------------------
7 
9 
10 #include <Helper.h>
11 #include <Logging.h>
12 
19 
20 namespace cloudViewer {
21 namespace t {
22 namespace pipelines {
23 namespace registration {
24 
26  const geometry::PointCloud &source,
27  const core::nns::NearestNeighborSearch &target_nns,
28  const double max_correspondence_distance,
29  const core::Tensor &transformation) {
30  core::AssertTensorShape(transformation, {4, 4});
31 
33  transformation.To(core::Device("CPU:0"), core::Float64));
34 
35  core::Tensor distances, counts;
36  std::tie(result.correspondences_, distances, counts) =
37  target_nns.HybridSearch(source.GetPointPositions(),
39  result.correspondences_ = result.correspondences_.To(core::Int64);
40  double num_correspondences =
41  counts.Sum({0}).To(core::Float64).Item<double>();
42 
43  if (num_correspondences != 0) {
44  // Reduction sum of "distances" for error.
45  const double squared_error =
46  distances.Sum({0}).To(core::Float64).Item<double>();
47 
48  result.fitness_ =
49  num_correspondences /
50  static_cast<double>(source.GetPointPositions().GetLength());
51  result.inlier_rmse_ = std::sqrt(squared_error / num_correspondences);
52  } else {
53  // Case of no-correspondences.
55  "0 correspondence present between the pointclouds. Try "
56  "increasing the max_correspondence_distance parameter.");
57  result.fitness_ = 0.0;
58  result.inlier_rmse_ = 0.0;
59  result.transformation_ =
61  }
62  return result;
63 }
64 
66  const geometry::PointCloud &target,
68  const core::Tensor &transformation) {
69  if (!target.HasPointPositions() || !source.HasPointPositions()) {
70  utility::LogError("Source and/or Target pointcloud is empty.");
71  }
73  {core::Float64, core::Float32});
75  source.GetPointPositions().GetDtype());
77 
78  geometry::PointCloud source_transformed = source.Clone();
79  source_transformed.Transform(transformation);
80 
82 
83  bool check = target_nns.HybridIndex(max_correspondence_distance);
84  if (!check) {
86  "NearestNeighborSearch::HybridSearch: Index is not set.");
87  }
88 
89  return ComputeRegistrationResult(source_transformed, target_nns,
91  transformation);
92 }
93 
94 RegistrationResult
95 ICP(const geometry::PointCloud &source,
96  const geometry::PointCloud &target,
97  const double max_correspondence_distance,
98  const core::Tensor &init_source_to_target,
99  const TransformationEstimation &estimation,
100  const ICPConvergenceCriteria &criteria,
101  const double voxel_size,
102  const std::function<void(const std::unordered_map<std::string, core::Tensor>
103  &)> &callback_after_iteration) {
104  return MultiScaleICP(source, target, {voxel_size}, {criteria},
105  {max_correspondence_distance}, init_source_to_target,
106  estimation, callback_after_iteration);
107 }
108 
110  const geometry::PointCloud &source,
111  const geometry::PointCloud &target,
112  const std::vector<double> &voxel_sizes,
113  const std::vector<ICPConvergenceCriteria> &criterias,
114  const std::vector<double> &max_correspondence_distances,
115  const core::Tensor &init_source_to_target,
116  const TransformationEstimation &estimation,
117  const int64_t &num_iterations,
118  const core::Device &device,
119  const core::Dtype &dtype) {
120  core::AssertTensorShape(init_source_to_target, {4, 4});
121 
122  if (!target.HasPointPositions() || !source.HasPointPositions()) {
123  utility::LogError("Source and/or Target pointcloud is empty.");
124  }
127 
128  if (dtype == core::Float64 && device.IsCUDA()) {
130  "Use Float32 pointcloud for best performance on CUDA device.");
131  }
132  if (!(criterias.size() == voxel_sizes.size() &&
133  criterias.size() == max_correspondence_distances.size())) {
135  "Size of criterias, voxel_size, max_correspondence_distances "
136  "vectors must be same.");
137  }
138  if (estimation.GetTransformationEstimationType() ==
140  (!target.HasPointNormals())) {
142  "TransformationEstimationPointToPlane require pre-computed "
143  "normal vectors for target PointCloud.");
144  }
145 
146  // ColoredICP requires pre-computed color_gradients for target points.
147  if (estimation.GetTransformationEstimationType() ==
149  if (!target.HasPointNormals()) {
151  "ColoredICP requires target pointcloud to have normals.");
152  }
153  if (!target.HasPointColors()) {
155  "ColoredICP requires target pointcloud to have colors.");
156  }
157  if (!source.HasPointColors()) {
159  "ColoredICP requires source pointcloud to have colors.");
160  }
161  }
162 
163  // Doppler ICP requires Doppler velocities and pre-computed directions for
164  // source point cloud.
165  if (estimation.GetTransformationEstimationType() ==
167  if (!target.HasPointNormals()) {
169  "DopplerICP requires target pointcloud to have normals.");
170  }
171  if (!source.HasPointAttr("dopplers")) {
173  "DopplerICP requires source pointcloud to have Doppler "
174  "velocities.");
175  }
176  if (!source.HasPointAttr("directions")) {
178  "DopplerICP requires source pointcloud to have "
179  "pre-computed direction vectors.");
180  }
181  }
182 
183  if (max_correspondence_distances[0] <= 0.0) {
185  " Max correspondence distance must be greater than 0, but"
186  " got {} in scale: {}.",
187  max_correspondence_distances[0], 0);
188  }
189 
190  for (int64_t i = 1; i < num_iterations; ++i) {
191  if (voxel_sizes[i] >= voxel_sizes[i - 1]) {
193  " [ICP] Voxel sizes must be in strictly decreasing order.");
194  }
195  if (max_correspondence_distances[i] <= 0.0) {
197  " Max correspondence distance must be greater than 0, but"
198  " got {} in scale: {}.",
199  max_correspondence_distances[i], i);
200  }
201  }
202 }
203 
204 static std::tuple<std::vector<t::geometry::PointCloud>,
205  std::vector<t::geometry::PointCloud>>
207  const geometry::PointCloud &source,
208  const geometry::PointCloud &target,
209  const std::vector<double> &voxel_sizes,
210  const double &max_correspondence_distance,
211  const TransformationEstimation &estimation,
212  const int64_t &num_iterations) {
213  std::vector<t::geometry::PointCloud> source_down_pyramid(num_iterations);
214  std::vector<t::geometry::PointCloud> target_down_pyramid(num_iterations);
215 
216  if (voxel_sizes[num_iterations - 1] <= 0) {
217  source_down_pyramid[num_iterations - 1] = source.Clone();
218  target_down_pyramid[num_iterations - 1] = target;
219  } else {
220  source_down_pyramid[num_iterations - 1] =
221  source.VoxelDownSample(voxel_sizes[num_iterations - 1]);
222  target_down_pyramid[num_iterations - 1] =
223  target.VoxelDownSample(voxel_sizes[num_iterations - 1]);
224  }
225 
226  // Computing Color Gradients.
227  if (estimation.GetTransformationEstimationType() ==
229  !target.HasPointAttr("color_gradients")) {
230  // `max_correspondence_distance * 2.0` or
231  // `voxel_sizes[num_iterations - 1] * 4.0` is an approximation, for
232  // `search_radius` in `EstimateColorGradients`. For more control /
233  // performance tuning, one may compute and save the `color_gradient`
234  // attribute in the target pointcloud manually by calling the function
235  // `EstimateColorGradients`, before passing it to the `ICP` function.
236  if (voxel_sizes[num_iterations - 1] <= 0) {
238  "Use voxel size parameter, for better performance in "
239  "ColoredICP.");
240  target_down_pyramid[num_iterations - 1].EstimateColorGradients(
241  30, max_correspondence_distance * 2.0);
242  } else {
243  target_down_pyramid[num_iterations - 1].EstimateColorGradients(
244  30, voxel_sizes[num_iterations - 1] * 4.0);
245  }
246  }
247 
248  for (int k = num_iterations - 2; k >= 0; k--) {
249  source_down_pyramid[k] =
250  source_down_pyramid[k + 1].VoxelDownSample(voxel_sizes[k]);
251  target_down_pyramid[k] =
252  target_down_pyramid[k + 1].VoxelDownSample(voxel_sizes[k]);
253  }
254 
255  return std::make_tuple(source_down_pyramid, target_down_pyramid);
256 }
257 
258 static std::tuple<RegistrationResult, int> DoSingleScaleICPIterations(
259  geometry::PointCloud &source,
260  const geometry::PointCloud &target,
261  const core::nns::NearestNeighborSearch &target_nns,
262  const ICPConvergenceCriteria &criteria,
263  const double max_correspondence_distance,
264  const TransformationEstimation &estimation,
265  const int scale_idx,
266  const int prev_iteration_count,
267  const core::Device &device,
268  const core::Dtype &dtype,
269  const RegistrationResult &current_result,
270  const std::function<void(std::unordered_map<std::string, core::Tensor>
271  &)> &callback_after_iteration) {
272  RegistrationResult result(current_result.transformation_);
273  double prev_fitness = current_result.fitness_;
274  double prev_inlier_rmse = current_result.inlier_rmse_;
275  int iteration_count = 0;
276  for (iteration_count = 0; iteration_count < criteria.max_iteration_;
277  ++iteration_count) {
278  // Update the results and find correspondences.
280  source.GetPointPositions(), target_nns,
281  max_correspondence_distance, result.transformation_);
282 
283  // No correspondences.
284  if (result.fitness_ <= std::numeric_limits<double>::min()) {
285  result.converged_ = false;
286  result.num_iterations_ = iteration_count;
287  return std::make_tuple(result,
288  prev_iteration_count + iteration_count);
289  }
290 
291  // Computing Transform between source and target, given
292  // correspondences. ComputeTransformation returns {4,4} shaped
293  // Float64 transformation tensor on CPU device.
294  const core::Tensor update =
295  estimation
297  source, target, result.correspondences_,
298  result.transformation_, iteration_count)
299  .To(core::Float64);
300 
301  // Multiply the transform to the cumulative transformation (update).
302  result.transformation_ = update.Matmul(result.transformation_);
303 
304  // Apply the transform on source pointcloud.
305  source.Transform(update);
306 
308  "ICP Scale #{:d} Iteration #{:d}: Fitness {:.4f}, RMSE "
309  "{:.4f}",
310  scale_idx, iteration_count, result.fitness_,
311  result.inlier_rmse_);
312 
313  if (callback_after_iteration) {
314  const core::Device host("CPU:0");
315 
316  std::unordered_map<std::string, core::Tensor> loss_attribute_map{
317  {"iteration_index",
318  core::Tensor::Init<int64_t>(prev_iteration_count +
319  iteration_count)},
320  {"scale_index", core::Tensor::Init<int64_t>(scale_idx)},
321  {"scale_iteration_index",
322  core::Tensor::Init<int64_t>(iteration_count)},
323  {"inlier_rmse",
324  core::Tensor::Init<double>(result.inlier_rmse_)},
325  {"fitness", core::Tensor::Init<double>(result.fitness_)},
326  {"transformation", result.transformation_.To(host)}};
327  callback_after_iteration(loss_attribute_map);
328  }
329 
330  // ICPConvergenceCriteria, to terminate iteration.
331  if (iteration_count != 0 &&
332  std::abs(prev_fitness - result.fitness_) <
333  criteria.relative_fitness_ &&
334  std::abs(prev_inlier_rmse - result.inlier_rmse_) <
335  criteria.relative_rmse_) {
336  result.converged_ = true;
337  break;
338  }
339  prev_fitness = result.fitness_;
340  prev_inlier_rmse = result.inlier_rmse_;
341  }
342  return std::make_tuple(result, prev_iteration_count + iteration_count);
343 }
344 
346  const geometry::PointCloud &source,
347  const geometry::PointCloud &target,
348  const std::vector<double> &voxel_sizes,
349  const std::vector<ICPConvergenceCriteria> &criterias,
350  const std::vector<double> &max_correspondence_distances,
351  const core::Tensor &init_source_to_target,
352  const TransformationEstimation &estimation,
353  const std::function<
354  void(const std::unordered_map<std::string, core::Tensor> &)>
355  &callback_after_iteration) {
357  {core::Float64, core::Float32});
358 
359  const core::Device device = source.GetDevice();
360  const core::Dtype dtype = source.GetPointPositions().GetDtype();
361  const int64_t num_scales = int64_t(criterias.size());
362 
363  // Asseting input parameters.
364  AssertInputMultiScaleICP(source, target, voxel_sizes, criterias,
365  max_correspondence_distances,
366  init_source_to_target, estimation, num_scales,
367  device, dtype);
368 
369  // Initializing point-cloud by down-sampling and computing required
370  // attributes.
371  std::vector<t::geometry::PointCloud> source_down_pyramid(num_scales);
372  std::vector<t::geometry::PointCloud> target_down_pyramid(num_scales);
373  std::tie(source_down_pyramid, target_down_pyramid) =
375  source, target, voxel_sizes,
376  max_correspondence_distances[num_scales - 1], estimation,
377  num_scales);
378 
379  // Transformation tensor is always of shape {4,4}, type Float64 on CPU:0.
380  core::Tensor transformation =
381  init_source_to_target.To(core::Device("CPU:0"), core::Float64);
382  RegistrationResult result(transformation);
383 
384  int iteration_count = 0;
385  // ---- Iterating over different resolution scale START -------------------
386  for (int64_t scale_idx = 0; scale_idx < num_scales; ++scale_idx) {
387  source_down_pyramid[scale_idx].Transform(result.transformation_);
388  // Initialize Neighbor Search.
390  target_down_pyramid[scale_idx].GetPointPositions());
391  bool check =
392  target_nns.HybridIndex(max_correspondence_distances[scale_idx]);
393  if (!check) {
394  utility::LogError("Index is not set.");
395  }
396 
397  // ICP iterations result for single scale.
398  std::tie(result, iteration_count) = DoSingleScaleICPIterations(
399  source_down_pyramid[scale_idx], target_down_pyramid[scale_idx],
400  target_nns, criterias[scale_idx],
401  max_correspondence_distances[scale_idx], estimation, scale_idx,
402  iteration_count, device, dtype, result,
403  callback_after_iteration);
404 
405  // To calculate final `fitness` and `inlier_rmse` for the current
406  // `transformation` stored in `result`.
407  if (scale_idx == num_scales - 1) {
408  bool preserved_converged_flag = result.converged_;
410  source_down_pyramid[scale_idx], target_nns,
411  max_correspondence_distances[scale_idx],
412  result.transformation_);
413  result.converged_ = preserved_converged_flag;
414  }
415 
416  // No correspondences.
417  if (result.fitness_ <= std::numeric_limits<double>::min()) {
418  result.converged_ = false;
419  break;
420  }
421  }
422  // ---- Iterating over different resolution scale END --------------------
423 
424  result.num_iterations_ = iteration_count;
425 
426  return result;
427 }
428 
430  const geometry::PointCloud &target,
431  const double max_correspondence_distance,
432  const core::Tensor &transformation) {
433  if (!target.HasPointPositions() || !source.HasPointPositions()) {
434  utility::LogError("Source and/or Target pointcloud is empty.");
435  }
436 
438  {core::Float64, core::Float32});
439 
441  source.GetPointPositions().GetDtype());
443 
444  geometry::PointCloud source_transformed = source.Clone();
445  source_transformed.Transform(transformation);
446 
448 
450 
451  core::Tensor correspondences, distances, counts;
452  std::tie(correspondences, distances, counts) =
453  target_nns.HybridSearch(source_transformed.GetPointPositions(),
455 
456  correspondences = correspondences.To(core::Int64);
457  int32_t num_correspondences = counts.Sum({0}).Item<int32_t>();
458 
459  if (num_correspondences == 0) {
461  "0 correspondence present between the pointclouds. Try "
462  "increasing the max_correspondence_distance parameter.");
463  }
464 
466  correspondences);
467 }
468 
469 } // namespace registration
470 } // namespace pipelines
471 } // namespace t
472 } // namespace cloudViewer
#define AssertTensorDevice(tensor,...)
Definition: TensorCheck.h:45
#define AssertTensorDtype(tensor,...)
Definition: TensorCheck.h:21
#define AssertTensorDtypes(tensor,...)
Definition: TensorCheck.h:33
#define AssertTensorShape(tensor,...)
Definition: TensorCheck.h:61
core::Tensor result
Definition: VtkUtils.cpp:76
bool IsCUDA() const
Returns true iff device type is CUDA.
Definition: Device.h:49
Tensor Matmul(const Tensor &rhs) const
Definition: Tensor.cpp:1919
Tensor Sum(const SizeVector &dims, bool keepdim=false) const
Definition: Tensor.cpp:1240
int64_t GetLength() const
Definition: Tensor.h:1125
Dtype GetDtype() const
Definition: Tensor.h:1164
static Tensor Eye(int64_t n, Dtype dtype, const Device &device)
Create an identity matrix of size n x n.
Definition: Tensor.cpp:418
Tensor To(Dtype dtype, bool copy=false) const
Definition: Tensor.cpp:739
A Class for nearest neighbor search.
bool HybridIndex(utility::optional< double > radius={})
std::tuple< Tensor, Tensor, Tensor > HybridSearch(const Tensor &query_points, const double radius, const int max_knn) const
A point cloud contains a list of 3D points.
Definition: PointCloud.h:82
PointCloud VoxelDownSample(double voxel_size, const std::string &reduction="mean") const
Downsamples a point cloud with a specified voxel size.
Definition: PointCloud.cpp:280
bool HasPointAttr(const std::string &key) const
Definition: PointCloud.h:207
core::Tensor & GetPointPositions()
Get the value of the "positions" attribute. Convenience function.
Definition: PointCloud.h:124
core::Device GetDevice() const override
Returns the device attribute of this PointCloud.
Definition: PointCloud.h:421
PointCloud Clone() const
Returns copy of the point cloud on the same device.
Definition: PointCloud.cpp:123
PointCloud & Transform(const core::Tensor &transformation)
Transforms the PointPositions and PointNormals (if exist) of the PointCloud.
Definition: PointCloud.cpp:171
Class that defines the convergence criteria of ICP.
Definition: Registration.h:31
int max_iteration_
Maximum iteration before iteration stops.
Definition: Registration.h:59
core::Tensor transformation_
The estimated transformation matrix of dtype Float64 on CPU device.
Definition: Registration.h:84
double inlier_rmse_
RMSE of all inlier correspondences. Lower is better.
Definition: Registration.h:90
virtual core::Tensor ComputeTransformation(const geometry::PointCloud &source, const geometry::PointCloud &target, const core::Tensor &correspondences, const core::Tensor &current_transform=core::Tensor::Eye(4, core::Float64, core::Device("CPU:0")), const std::size_t iteration=0) const =0
virtual TransformationEstimationType GetTransformationEstimationType() const =0
#define LogWarning(...)
Definition: Logging.h:72
#define LogError(...)
Definition: Logging.h:60
#define LogDebug(...)
Definition: Logging.h:90
int min(int a, int b)
Definition: cutil_math.h:53
__host__ __device__ int2 abs(int2 v)
Definition: cutil_math.h:1267
Helper functions for the ml ops.
const Dtype Int64
Definition: Dtype.cpp:47
const Dtype Float64
Definition: Dtype.cpp:43
void To(const core::Tensor &src, core::Tensor &dst, double scale, double offset)
Definition: Image.cpp:17
core::Tensor ComputeInformationMatrix(const core::Tensor &target_points, const core::Tensor &correspondence_indices)
Computes Information Matrix of shape {6, 6}, of dtype Float64 on device CPU:0, from the target point ...
core::Tensor GetInformationMatrix(const geometry::PointCloud &source, const geometry::PointCloud &target, const double max_correspondence_distance, const core::Tensor &transformation)
Computes Information Matrix, from the transfromation between source and target pointcloud....
static const double max_correspondence_distance
RegistrationResult ICP(const geometry::PointCloud &source, const geometry::PointCloud &target, const double max_correspondence_distance, const core::Tensor &init_source_to_target, const TransformationEstimation &estimation, const ICPConvergenceCriteria &criteria, const double voxel_size, const std::function< void(const std::unordered_map< std::string, core::Tensor > &)> &callback_after_iteration)
Functions for ICP registration.
RegistrationResult EvaluateRegistration(const geometry::PointCloud &source, const geometry::PointCloud &target, double max_correspondence_distance, const core::Tensor &transformation)
Function for evaluating registration between point clouds.
static void AssertInputMultiScaleICP(const geometry::PointCloud &source, const geometry::PointCloud &target, const std::vector< double > &voxel_sizes, const std::vector< ICPConvergenceCriteria > &criterias, const std::vector< double > &max_correspondence_distances, const core::Tensor &init_source_to_target, const TransformationEstimation &estimation, const int64_t &num_iterations, const core::Device &device, const core::Dtype &dtype)
static std::tuple< std::vector< t::geometry::PointCloud >, std::vector< t::geometry::PointCloud > > InitializePointCloudPyramidForMultiScaleICP(const geometry::PointCloud &source, const geometry::PointCloud &target, const std::vector< double > &voxel_sizes, const double &max_correspondence_distance, const TransformationEstimation &estimation, const int64_t &num_iterations)
static RegistrationResult ComputeRegistrationResult(const geometry::PointCloud &source, const core::nns::NearestNeighborSearch &target_nns, const double max_correspondence_distance, const core::Tensor &transformation)
static std::tuple< RegistrationResult, int > DoSingleScaleICPIterations(geometry::PointCloud &source, const geometry::PointCloud &target, const core::nns::NearestNeighborSearch &target_nns, const ICPConvergenceCriteria &criteria, const double max_correspondence_distance, const TransformationEstimation &estimation, const int scale_idx, const int prev_iteration_count, const core::Device &device, const core::Dtype &dtype, const RegistrationResult &current_result, const std::function< void(std::unordered_map< std::string, core::Tensor > &)> &callback_after_iteration)
RegistrationResult MultiScaleICP(const geometry::PointCloud &source, const geometry::PointCloud &target, const std::vector< double > &voxel_sizes, const std::vector< ICPConvergenceCriteria > &criterias, const std::vector< double > &max_correspondence_distances, const core::Tensor &init_source_to_target, const TransformationEstimation &estimation, const std::function< void(const std::unordered_map< std::string, core::Tensor > &)> &callback_after_iteration)
Functions for Multi-Scale ICP registration. It will run ICP on different voxel level,...
Generic file read and write utility for python interface.