ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
PointCloud.cpp
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - CloudViewer: www.cloudViewer.org -
3 // ----------------------------------------------------------------------------
4 // Copyright (c) 2018-2024 www.cloudViewer.org
5 // SPDX-License-Identifier: MIT
6 // ----------------------------------------------------------------------------
7 
9 
10 #include <CVGeom.h>
11 #include <libqhullcpp/PointCoordinates.h>
12 #include <libqhullcpp/Qhull.h>
13 #include <libqhullcpp/QhullFacet.h>
14 #include <libqhullcpp/QhullFacetList.h>
15 #include <libqhullcpp/QhullVertexSet.h>
16 
17 #include <Eigen/Core>
18 #include <algorithm>
19 #include <limits>
20 #include <string>
21 #include <unordered_map>
22 #include <utility>
23 #include <vector>
24 
44 
45 namespace cloudViewer {
46 namespace t {
47 namespace geometry {
48 
51  device_(device),
52  point_attr_(TensorMap("positions")) {}
53 
55  : PointCloud(points.GetDevice()) {
58 }
59 
60 PointCloud::PointCloud(const std::unordered_map<std::string, core::Tensor>
61  &map_keys_to_tensors)
63  point_attr_(TensorMap("positions")) {
64  if (map_keys_to_tensors.count("positions") == 0) {
65  utility::LogError("\"positions\" attribute must be specified.");
66  }
67  device_ = map_keys_to_tensors.at("positions").GetDevice();
68  core::AssertTensorShape(map_keys_to_tensors.at("positions"),
69  {utility::nullopt, 3});
70  point_attr_ = TensorMap("positions", map_keys_to_tensors.begin(),
71  map_keys_to_tensors.end());
72 }
73 
74 std::string PointCloud::ToString() const {
75  size_t num_points = 0;
76  std::string points_dtype_str = "";
77  if (point_attr_.count(point_attr_.GetPrimaryKey())) {
78  num_points = GetPointPositions().GetLength();
79  points_dtype_str =
80  fmt::format(" ({})", GetPointPositions().GetDtype().ToString());
81  }
82  auto str =
83  fmt::format("PointCloud on {} [{} points{}].\nAttributes:",
84  GetDevice().ToString(), num_points, points_dtype_str);
85 
86  if ((point_attr_.size() - point_attr_.count(point_attr_.GetPrimaryKey())) ==
87  0)
88  return str + " None.";
89  for (const auto &keyval : point_attr_) {
90  if (keyval.first != "positions") {
91  str += fmt::format(" {} (dtype = {}, shape = {}),", keyval.first,
92  keyval.second.GetDtype().ToString(),
93  keyval.second.GetShape().ToString());
94  }
95  }
96  str[str.size() - 1] = '.';
97  return str;
98 }
99 
101  return GetPointPositions().Min({0});
102 }
103 
105  return GetPointPositions().Max({0});
106 }
107 
109  return GetPointPositions().Mean({0});
110 }
111 
112 PointCloud PointCloud::To(const core::Device &device, bool copy) const {
113  if (!copy && GetDevice() == device) {
114  return *this;
115  }
116  PointCloud pcd(device);
117  for (auto &kv : point_attr_) {
118  pcd.SetPointAttr(kv.first, kv.second.To(device, /*copy=*/true));
119  }
120  return pcd;
121 }
122 
123 PointCloud PointCloud::Clone() const { return To(GetDevice(), /*copy=*/true); }
124 
126  PointCloud pcd(GetDevice());
127 
128  int64_t length = GetPointPositions().GetLength();
129 
130  for (auto &kv : point_attr_) {
131  if (other.HasPointAttr(kv.first)) {
132  auto other_attr = other.GetPointAttr(kv.first);
133  core::AssertTensorDtype(other_attr, kv.second.GetDtype());
134  core::AssertTensorDevice(other_attr, kv.second.GetDevice());
135 
136  // Checking shape compatibility.
137  auto other_attr_shape = other_attr.GetShape();
138  auto attr_shape = kv.second.GetShape();
139  int64_t combined_length = other_attr_shape[0] + attr_shape[0];
140  other_attr_shape[0] = combined_length;
141  attr_shape[0] = combined_length;
142  if (other_attr_shape != attr_shape) {
144  "Shape mismatch. Attribute {}, shape {}, is not "
145  "compatible with {}.",
146  kv.first, other_attr.GetShape(), kv.second.GetShape());
147  }
148 
149  core::Tensor combined_attr =
150  core::Tensor::Empty(other_attr_shape, kv.second.GetDtype(),
151  kv.second.GetDevice());
152 
153  combined_attr.SetItem(core::TensorKey::Slice(0, length, 1),
154  kv.second);
155  combined_attr.SetItem(
156  core::TensorKey::Slice(length, combined_length, 1),
157  other_attr);
158 
159  pcd.SetPointAttr(kv.first, combined_attr.Clone());
160  } else {
162  "The pointcloud is missing attribute {}. The pointcloud "
163  "being appended, must have all the attributes present in "
164  "the pointcloud it is being appended to.",
165  kv.first);
166  }
167  }
168  return pcd;
169 }
170 
172  core::AssertTensorShape(transformation, {4, 4});
173 
175  if (HasPointNormals()) {
177  }
178 
179  return *this;
180 }
181 
183  bool relative) {
184  core::AssertTensorShape(translation, {3});
185 
186  core::Tensor transform =
187  translation.To(GetDevice(), GetPointPositions().GetDtype());
188 
189  if (!relative) {
190  transform -= GetCenter();
191  }
192  GetPointPositions() += transform;
193  return *this;
194 }
195 
196 PointCloud &PointCloud::Scale(double scale, const core::Tensor &center) {
197  core::AssertTensorShape(center, {3});
198 
199  const core::Tensor center_d =
200  center.To(GetDevice(), GetPointPositions().GetDtype());
201 
202  GetPointPositions().Sub_(center_d).Mul_(scale).Add_(center_d);
203  return *this;
204 }
205 
207  const core::Tensor &center) {
208  core::AssertTensorShape(R, {3, 3});
209  core::AssertTensorShape(center, {3});
210 
212 
213  if (HasPointNormals()) {
215  }
216  return *this;
217 }
218 
220  bool invert /* = false */) const {
221  const int64_t length = GetPointPositions().GetLength();
223  core::AssertTensorShape(boolean_mask, {length});
224  core::AssertTensorDevice(boolean_mask, GetDevice());
225 
226  core::Tensor indices_local;
227  if (invert) {
228  indices_local = boolean_mask.LogicalNot();
229  } else {
230  indices_local = boolean_mask;
231  }
232 
233  PointCloud pcd(GetDevice());
234  for (auto &kv : GetPointAttr()) {
235  if (HasPointAttr(kv.first)) {
236  pcd.SetPointAttr(kv.first, kv.second.IndexGet({indices_local}));
237  }
238  }
239 
240  utility::LogDebug("Pointcloud down sampled from {} points to {} points.",
242  return pcd;
243 }
244 
246  const core::Tensor &indices,
247  bool invert /* = false */,
248  bool remove_duplicates /* = false */) const {
249  const int64_t length = GetPointPositions().GetLength();
252 
253  PointCloud pcd(GetDevice());
254 
255  if (!remove_duplicates && !invert) {
257  for (auto &kv : GetPointAttr()) {
258  if (HasPointAttr(kv.first)) {
259  pcd.SetPointAttr(kv.first, kv.second.GetItem(key));
260  }
261  }
263  "Pointcloud down sampled from {} points to {} points.", length,
264  pcd.GetPointPositions().GetLength());
265  } else {
266  // The indices may have duplicate index value and will result in
267  // identity point cloud attributes. We convert indices Tensor into mask
268  // Tensor and call SelectByMask to avoid this situation.
269  core::Tensor mask =
272  core::Tensor::Init<bool>(true, GetDevice()));
273 
274  pcd = SelectByMask(mask, invert);
275  }
276 
277  return pcd;
278 }
279 
281  const std::string &reduction) const {
282  if (voxel_size <= 0) {
283  utility::LogError("voxel_size must be positive.");
284  }
285  if (reduction != "mean") {
286  utility::LogError("Reduction can only be 'mean' for VoxelDownSample.");
287  }
288 
289  // Discretize voxels.
290  core::Tensor voxeld = GetPointPositions() / voxel_size;
291  core::Tensor voxeli = voxeld.Floor().To(core::Int64);
292 
293  // Map discrete voxels to indices.
294  core::HashSet voxeli_hashset(voxeli.GetLength(), core::Int64, {3}, device_);
295 
296  // Index map: (0, original_points) -> (0, unique_points).
297  core::Tensor index_map_point2voxel, masks;
298  voxeli_hashset.Insert(voxeli, index_map_point2voxel, masks);
299 
300  // Insert and find are two different passes.
301  // In the insertion pass, -1/false is returned for already existing
302  // downsampled corresponding points.
303  // In the find pass, actual indices are returned corresponding downsampled
304  // points.
305  voxeli_hashset.Find(voxeli, index_map_point2voxel, masks);
306  index_map_point2voxel = index_map_point2voxel.To(core::Int64);
307 
308  int64_t num_points = voxeli.GetLength();
309  int64_t num_voxels = voxeli_hashset.Size();
310 
311  // Count the number of points in each voxel.
312  auto voxel_num_points =
313  core::Tensor::Zeros({num_voxels}, core::Float32, device_);
314  voxel_num_points.IndexAdd_(
315  /*dim*/ 0, index_map_point2voxel,
316  core::Tensor::Ones({num_points}, core::Float32, device_));
317 
318  // Create a new point cloud.
319  PointCloud pcd_down(device_);
320  for (auto &kv : point_attr_) {
321  auto point_attr = kv.second;
322 
323  std::string attr_string = kv.first;
324  auto attr_dtype = point_attr.GetDtype();
325 
326  // Use float to avoid unsupported tensor types.
327  core::SizeVector attr_shape = point_attr.GetShape();
328  attr_shape[0] = num_voxels;
329  auto voxel_attr =
331  if (reduction == "mean") {
332  voxel_attr.IndexAdd_(0, index_map_point2voxel,
333  point_attr.To(core::Float32));
334  voxel_attr /= voxel_num_points.View({-1, 1});
335  voxel_attr = voxel_attr.To(attr_dtype);
336  } else {
337  utility::LogError("Unsupported reduction type {}.", reduction);
338  }
339  pcd_down.SetPointAttr(attr_string, voxel_attr);
340  }
341 
342  return pcd_down;
343 }
344 
345 PointCloud PointCloud::UniformDownSample(size_t every_k_points) const {
346  if (every_k_points == 0) {
348  "Illegal sample rate, every_k_points must be larger than 0.");
349  }
350 
351  const int64_t length = GetPointPositions().GetLength();
352 
353  PointCloud pcd_down(GetDevice());
354  for (auto &kv : GetPointAttr()) {
355  pcd_down.SetPointAttr(
356  kv.first,
357  kv.second.Slice(0, 0, length, (int64_t)every_k_points));
358  }
359 
360  return pcd_down;
361 }
362 
363 PointCloud PointCloud::RandomDownSample(double sampling_ratio) const {
364  if (sampling_ratio < 0 || sampling_ratio > 1) {
366  "Illegal sampling_ratio {}, sampling_ratio must be between 0 "
367  "and 1.");
368  }
369 
370  const int64_t length = GetPointPositions().GetLength();
371  std::vector<int64_t> indices(length);
372  std::iota(std::begin(indices), std::end(indices), 0);
373  {
374  std::lock_guard<std::mutex> lock(*utility::random::GetMutex());
375  std::shuffle(indices.begin(), indices.end(),
377  }
378 
379  const int sample_size = sampling_ratio * length;
380  indices.resize(sample_size);
381  // TODO: Generate random indices in GPU using CUDA rng maybe more efficient
382  // than copy indices data from CPU to GPU.
383  return SelectByIndex(
384  core::Tensor(indices, {sample_size}, core::Int64, GetDevice()),
385  false, false);
386 }
387 
389  const size_t start_index) const {
390  const core::Dtype dtype = GetPointPositions().GetDtype();
391  const int64_t num_points = GetPointPositions().GetLength();
392  if (num_samples == 0) {
393  return PointCloud(GetDevice());
394  } else if (num_samples == size_t(num_points)) {
395  return Clone();
396  } else if (num_samples > size_t(num_points)) {
398  "Illegal number of samples: {}, must <= point size: {}",
399  num_samples, num_points);
400  } else if (start_index >= size_t(num_points)) {
401  utility::LogError("Illegal start index: {}, must <= point size: {}",
402  start_index, num_points);
403  }
404  core::Tensor selection_mask =
405  core::Tensor::Zeros({num_points}, core::Bool, GetDevice());
406  core::Tensor smallest_distances = core::Tensor::Full(
407  {num_points}, std::numeric_limits<double>::infinity(), dtype,
408  GetDevice());
409 
410  int64_t farthest_index = static_cast<int64_t>(start_index);
411 
412  for (size_t i = 0; i < num_samples; i++) {
413  selection_mask[farthest_index] = true;
414  core::Tensor selected = GetPointPositions()[farthest_index];
415 
416  core::Tensor diff = GetPointPositions() - selected;
417  core::Tensor distances_to_selected = (diff * diff).Sum({1});
418  smallest_distances = cloudViewer::core::Minimum(distances_to_selected,
419  smallest_distances);
420 
421  farthest_index = smallest_distances.ArgMax({0}).Item<int64_t>();
422  }
423  return SelectByMask(selection_mask);
424 }
425 
426 std::tuple<PointCloud, core::Tensor> PointCloud::RemoveRadiusOutliers(
427  size_t nb_points, double search_radius) const {
428  if (nb_points < 1 || search_radius <= 0) {
430  "Illegal input parameters, number of points and radius must be "
431  "positive");
432  }
434 
435  const bool check = target_nns.FixedRadiusIndex(search_radius);
436  if (!check) {
437  utility::LogError("Fixed radius search index is not set.");
438  }
439 
440  core::Tensor indices, distance, row_splits;
441  std::tie(indices, distance, row_splits) = target_nns.FixedRadiusSearch(
442  GetPointPositions(), search_radius, false);
443  row_splits = row_splits.To(GetDevice());
444 
445  const int64_t size = row_splits.GetLength();
446  const core::Tensor num_neighbors =
447  row_splits.Slice(0, 1, size) - row_splits.Slice(0, 0, size - 1);
448 
449  const core::Tensor valid =
450  num_neighbors.Ge(static_cast<int64_t>(nb_points));
451  return std::make_tuple(SelectByMask(valid), valid);
452 }
453 
454 std::tuple<PointCloud, core::Tensor> PointCloud::RemoveStatisticalOutliers(
455  size_t nb_neighbors, double std_ratio) const {
456  if (nb_neighbors < 1 || std_ratio <= 0) {
458  "Illegal input parameters, the number of neighbors and "
459  "standard deviation ratio must be positive.");
460  }
461  if (GetPointPositions().GetLength() == 0) {
462  return std::make_tuple(PointCloud(GetDevice()),
464  }
465 
467  const bool check = nns.KnnIndex();
468  if (!check) {
469  utility::LogError("Knn search index is not set.");
470  }
471 
472  core::Tensor indices, distance2;
473  std::tie(indices, distance2) =
474  nns.KnnSearch(GetPointPositions(), nb_neighbors);
475 
476  core::Tensor avg_distances = distance2.Sqrt().Mean({1});
477  const double cloud_mean =
478  avg_distances.Mean({0}).To(core::Float64).Item<double>();
479  const core::Tensor std_distances_centered = avg_distances - cloud_mean;
480  const double sq_sum = (std_distances_centered * std_distances_centered)
481  .Sum({0})
482  .To(core::Float64)
483  .Item<double>();
484  const double std_dev =
485  std::sqrt(sq_sum / (avg_distances.GetShape()[0] - 1));
486  const double distance_threshold = cloud_mean + std_ratio * std_dev;
487  const core::Tensor valid = avg_distances.Le(distance_threshold);
488 
489  return std::make_tuple(SelectByMask(valid), valid);
490 }
491 
492 std::tuple<PointCloud, core::Tensor> PointCloud::RemoveNonFinitePoints(
493  bool remove_nan, bool remove_inf) const {
494  core::Tensor finite_indices_mask;
495  const core::SizeVector dim = {1};
496  if (remove_nan && remove_inf) {
497  finite_indices_mask =
498  this->GetPointPositions().IsFinite().All(dim, false);
499  } else if (remove_nan) {
500  finite_indices_mask =
501  this->GetPointPositions().IsNan().LogicalNot().All(dim, false);
502  } else if (remove_inf) {
503  finite_indices_mask =
504  this->GetPointPositions().IsInf().LogicalNot().All(dim, false);
505  } else {
506  finite_indices_mask = core::Tensor::Full(
507  {this->GetPointPositions().GetLength()}, true, core::Bool,
508  this->GetPointPositions().GetDevice());
509  }
510 
511  utility::LogDebug("Removing non-finite points.");
512  return std::make_tuple(SelectByMask(finite_indices_mask),
513  finite_indices_mask);
514 }
515 
516 std::tuple<PointCloud, core::Tensor> PointCloud::RemoveDuplicatedPoints()
517  const {
518  core::Tensor points_voxeli;
519  const core::Dtype dtype = GetPointPositions().GetDtype();
520  if (dtype.ByteSize() == 4) {
521  points_voxeli = GetPointPositions().ReinterpretCast(core::Int32);
522  } else if (dtype.ByteSize() == 8) {
523  points_voxeli = GetPointPositions().ReinterpretCast(core::Int64);
524  } else {
526  "Unsupported point position data-type. Only support "
527  "Int32, Int64, Float32 and Float64.");
528  }
529 
530  core::HashSet points_voxeli_hashset(points_voxeli.GetLength(),
531  points_voxeli.GetDtype(), {3}, device_);
532  core::Tensor buf_indices, masks;
533  points_voxeli_hashset.Insert(points_voxeli, buf_indices, masks);
534 
535  return std::make_tuple(SelectByMask(masks), masks);
536 }
537 
539  if (!HasPointNormals()) {
540  utility::LogWarning("PointCloud has no normals.");
541  return *this;
542  } else {
543  SetPointNormals(GetPointNormals().Contiguous());
544  }
545 
547  if (IsCPU()) {
549  } else if (IsCUDA()) {
550  CUDA_CALL(kernel::pointcloud::NormalizeNormalsCUDA, normals);
551  } else {
552  utility::LogError("Unimplemented device");
553  }
554 
555  return *this;
556 }
557 
560  core::Tensor clipped_color = color.To(GetDevice());
561  if (color.GetDtype() == core::Float32 ||
562  color.GetDtype() == core::Float64) {
563  clipped_color = clipped_color.Clip(0.0f, 1.0f);
564  }
565  core::Tensor pcd_colors =
567  clipped_color.GetDtype(), GetDevice());
568  pcd_colors.AsRvalue() = clipped_color;
569  SetPointColors(pcd_colors);
570 
571  return *this;
572 }
573 
574 std::tuple<PointCloud, core::Tensor> PointCloud::ComputeBoundaryPoints(
575  double radius, int max_nn, double angle_threshold) const {
578  if (!HasPointNormals()) {
580  "PointCloud must have normals attribute to compute boundary "
581  "points.");
582  }
583 
584  const core::Device device = GetDevice();
585  const int64_t num_points = GetPointPositions().GetLength();
586 
587  const core::Tensor points_d = GetPointPositions().Contiguous();
588  const core::Tensor normals_d = GetPointNormals().Contiguous();
589 
590  // Compute nearest neighbors.
591  core::Tensor indices, distance2, counts;
593 
594  bool check = tree.HybridIndex(radius);
595  if (!check) {
596  utility::LogError("Building HybridIndex failed.");
597  }
598  std::tie(indices, distance2, counts) =
599  tree.HybridSearch(points_d, radius, max_nn);
601  "Use HybridSearch [max_nn: {} | radius {}] for computing "
602  "boundary points.",
603  max_nn, radius);
604 
605  core::Tensor mask = core::Tensor::Zeros({num_points}, core::Bool, device);
606  if (IsCPU()) {
608  points_d, normals_d, indices, counts, mask, angle_threshold);
609  } else if (IsCUDA()) {
610  CUDA_CALL(kernel::pointcloud::ComputeBoundaryPointsCUDA, points_d,
611  normals_d, indices, counts, mask, angle_threshold);
612  } else {
613  utility::LogError("Unimplemented device");
614  }
615 
616  return std::make_tuple(SelectByMask(mask), mask);
617 }
618 
620  const utility::optional<int> max_knn /* = 30*/,
621  const utility::optional<double> radius /*= utility::nullopt*/) {
624 
625  const core::Dtype dtype = this->GetPointPositions().GetDtype();
626  const core::Device device = GetDevice();
627 
628  const bool has_normals = HasPointNormals();
629 
630  if (!has_normals) {
632  {GetPointPositions().GetLength(), 3}, dtype, device));
633  } else {
635 
636  this->SetPointNormals(GetPointNormals().Contiguous());
637  }
638 
639  this->SetPointAttr(
640  "covariances",
642  device));
643 
644  if (radius.has_value() && max_knn.has_value()) {
645  utility::LogDebug("Using Hybrid Search for computing covariances");
646  // Computes and sets `covariances` attribute using Hybrid Search
647  // method.
648  if (IsCPU()) {
650  this->GetPointPositions().Contiguous(),
651  this->GetPointAttr("covariances"), radius.value(),
652  max_knn.value());
653  } else if (IsCUDA()) {
654  CUDA_CALL(kernel::pointcloud::
655  EstimateCovariancesUsingHybridSearchCUDA,
656  this->GetPointPositions().Contiguous(),
657  this->GetPointAttr("covariances"), radius.value(),
658  max_knn.value());
659  } else {
660  utility::LogError("Unimplemented device");
661  }
662  } else if (max_knn.has_value() && !radius.has_value()) {
663  utility::LogDebug("Using KNN Search for computing covariances");
664  // Computes and sets `covariances` attribute using KNN Search method.
665  if (IsCPU()) {
667  this->GetPointPositions().Contiguous(),
668  this->GetPointAttr("covariances"), max_knn.value());
669  } else if (IsCUDA()) {
670  CUDA_CALL(kernel::pointcloud::EstimateCovariancesUsingKNNSearchCUDA,
671  this->GetPointPositions().Contiguous(),
672  this->GetPointAttr("covariances"), max_knn.value());
673  } else {
674  utility::LogError("Unimplemented device");
675  }
676  } else if (!max_knn.has_value() && radius.has_value()) {
677  utility::LogDebug("Using Radius Search for computing covariances");
678  // Computes and sets `covariances` attribute using KNN Search method.
679  if (IsCPU()) {
681  this->GetPointPositions().Contiguous(),
682  this->GetPointAttr("covariances"), radius.value());
683  } else if (IsCUDA()) {
684  CUDA_CALL(kernel::pointcloud::
685  EstimateCovariancesUsingRadiusSearchCUDA,
686  this->GetPointPositions().Contiguous(),
687  this->GetPointAttr("covariances"), radius.value());
688  } else {
689  utility::LogError("Unimplemented device");
690  }
691  } else {
692  utility::LogError("Both max_nn and radius are none.");
693  }
694 
695  // Estimate `normal` of each point using its `covariance` matrix.
696  if (IsCPU()) {
698  this->GetPointAttr("covariances"), this->GetPointNormals(),
699  has_normals);
700  } else if (IsCUDA()) {
701  CUDA_CALL(kernel::pointcloud::EstimateNormalsFromCovariancesCUDA,
702  this->GetPointAttr("covariances"), this->GetPointNormals(),
703  has_normals);
704  } else {
705  utility::LogError("Unimplemented device");
706  }
707 
708  // TODO (@rishabh): Don't remove covariances attribute, when
709  // EstimateCovariance functionality is exposed.
710  RemovePointAttr("covariances");
711 }
712 
714  const core::Tensor &orientation_reference) {
715  core::AssertTensorDevice(orientation_reference, GetDevice());
716  core::AssertTensorShape(orientation_reference, {3});
717 
718  if (!HasPointNormals()) {
720  "No normals in the PointCloud. Call EstimateNormals() first.");
721  } else {
722  SetPointNormals(GetPointNormals().Contiguous());
723  }
724 
725  core::Tensor reference =
726  orientation_reference.To(GetPointPositions().GetDtype());
727 
729  if (IsCPU()) {
731  reference);
732  } else if (IsCUDA()) {
733  CUDA_CALL(kernel::pointcloud::OrientNormalsToAlignWithDirectionCUDA,
734  normals, reference);
735  } else {
736  utility::LogError("Unimplemented device");
737  }
738 }
739 
741  const core::Tensor &camera_location) {
742  core::AssertTensorDevice(camera_location, GetDevice());
743  core::AssertTensorShape(camera_location, {3});
744 
745  if (!HasPointNormals()) {
747  "No normals in the PointCloud. Call EstimateNormals() first.");
748  } else {
749  SetPointNormals(GetPointNormals().Contiguous());
750  }
751 
752  core::Tensor reference = camera_location.To(GetPointPositions().GetDtype());
753 
755  if (IsCPU()) {
757  GetPointPositions().Contiguous(), normals, reference);
758  } else if (IsCUDA()) {
759  CUDA_CALL(kernel::pointcloud::OrientNormalsTowardsCameraLocationCUDA,
760  GetPointPositions().Contiguous(), normals, reference);
761  } else {
762  utility::LogError("Unimplemented device");
763  }
764 }
765 
767  size_t k,
768  const double lambda /* = 0.0*/,
769  const double cos_alpha_tol /* = 1.0*/) {
770  // Fallback: legacy API not available in this fork; keep normals as-is.
771  (void)k;
772  (void)lambda;
773  (void)cos_alpha_tol;
774  if (!HasPointNormals()) {
776  "OrientNormalsConsistentTangentPlane: normals are missing; "
777  "call EstimateNormals() first.");
778  }
779 }
780 
782  const utility::optional<int> max_knn /* = 30*/,
783  const utility::optional<double> radius /*= utility::nullopt*/) {
784  if (!HasPointColors() || !HasPointNormals()) {
786  "PointCloud must have colors and normals attribute "
787  "to compute color gradients.");
788  }
791 
792  const core::Dtype dtype = this->GetPointColors().GetDtype();
793  const core::Device device = GetDevice();
794 
795  if (!this->HasPointAttr("color_gradients")) {
796  this->SetPointAttr(
797  "color_gradients",
799  device));
800  } else {
801  if (this->GetPointAttr("color_gradients").GetDtype() != dtype) {
803  "color_gradients attribute must be of same dtype as "
804  "points.");
805  }
806  // If color_gradients attribute is already present, do not re-compute.
807  return;
808  }
809 
810  // Compute and set `color_gradients` attribute.
811  if (radius.has_value() && max_knn.has_value()) {
812  utility::LogDebug("Using Hybrid Search for computing color_gradients");
813  if (IsCPU()) {
815  this->GetPointPositions().Contiguous(),
816  this->GetPointNormals().Contiguous(),
817  this->GetPointColors().Contiguous(),
818  this->GetPointAttr("color_gradients"), radius.value(),
819  max_knn.value());
820  } else if (IsCUDA()) {
821  CUDA_CALL(kernel::pointcloud::
822  EstimateColorGradientsUsingHybridSearchCUDA,
823  this->GetPointPositions().Contiguous(),
824  this->GetPointNormals().Contiguous(),
825  this->GetPointColors().Contiguous(),
826  this->GetPointAttr("color_gradients"), radius.value(),
827  max_knn.value());
828  } else {
829  utility::LogError("Unimplemented device");
830  }
831  } else if (max_knn.has_value() && !radius.has_value()) {
832  utility::LogDebug("Using KNN Search for computing color_gradients");
833  if (IsCPU()) {
835  this->GetPointPositions().Contiguous(),
836  this->GetPointNormals().Contiguous(),
837  this->GetPointColors().Contiguous(),
838  this->GetPointAttr("color_gradients"), max_knn.value());
839  } else if (IsCUDA()) {
840  CUDA_CALL(kernel::pointcloud::
841  EstimateColorGradientsUsingKNNSearchCUDA,
842  this->GetPointPositions().Contiguous(),
843  this->GetPointNormals().Contiguous(),
844  this->GetPointColors().Contiguous(),
845  this->GetPointAttr("color_gradients"), max_knn.value());
846  } else {
847  utility::LogError("Unimplemented device");
848  }
849  } else if (!max_knn.has_value() && radius.has_value()) {
850  utility::LogDebug("Using Radius Search for computing color_gradients");
851  if (IsCPU()) {
853  this->GetPointPositions().Contiguous(),
854  this->GetPointNormals().Contiguous(),
855  this->GetPointColors().Contiguous(),
856  this->GetPointAttr("color_gradients"), radius.value());
857  } else if (IsCUDA()) {
858  CUDA_CALL(kernel::pointcloud::
859  EstimateColorGradientsUsingRadiusSearchCUDA,
860  this->GetPointPositions().Contiguous(),
861  this->GetPointNormals().Contiguous(),
862  this->GetPointColors().Contiguous(),
863  this->GetPointAttr("color_gradients"), radius.value());
864  } else {
865  utility::LogError("Unimplemented device");
866  }
867  } else {
868  utility::LogError("Both max_nn and radius are none.");
869  }
870 }
871 
873  const Image &depth_in, /* UInt16 or Float32 */
874  const Image &color_in, /* Float32 */
875  const core::Tensor &intrinsics_in,
876  const core::Tensor &extrinsics,
877  float depth_scale,
878  float depth_max,
879  int stride) {
880  using core::None;
881  using core::Tensor;
882  using core::TensorKey;
883  const float invalid_fill = NAN;
884  // Filter defaults for depth processing
885  const int bilateral_kernel_size = // bilateral filter defaults for backends
886  depth_in.IsCUDA() ? 3 : 5;
887  const float depth_diff_threshold = 0.14f;
888  const float bilateral_value_sigma = 10.f;
889  const float bilateral_distance_sigma = 10.f;
890  if ((stride & (stride - 1)) != 0) {
892  "Only powers of 2 are supported for stride when "
893  "with_normals=true.");
894  }
895  if (color_in.GetRows() > 0 && (depth_in.GetRows() != color_in.GetRows() ||
896  depth_in.GetCols() != color_in.GetCols())) {
897  utility::LogError("Depth and color images have different sizes.");
898  }
899  auto depth =
900  depth_in.ClipTransform(depth_scale, 0.01f, depth_max, invalid_fill);
901  auto color = color_in;
902  auto intrinsics = intrinsics_in / stride;
903  intrinsics[-1][-1] = 1.f;
904  if (stride == 1) {
905  depth = depth.FilterBilateral(bilateral_kernel_size,
906  bilateral_value_sigma,
907  bilateral_distance_sigma);
908  } else {
909  for (; stride > 1; stride /= 2) {
910  depth = depth.PyrDownDepth(depth_diff_threshold, invalid_fill);
911  color = color.PyrDown();
912  }
913  }
914  const int64_t im_size = depth.GetRows() * depth.GetCols();
915  auto vertex_map = depth.CreateVertexMap(intrinsics, invalid_fill);
916  auto vertex_list = vertex_map.AsTensor().View({im_size, 3});
917  if (!extrinsics.AllClose(Tensor::Eye(4, extrinsics.GetDtype(),
918  extrinsics.GetDevice()))) {
919  auto cam_to_world = extrinsics.Inverse();
920  vertex_list = vertex_list
921  .Matmul(cam_to_world.Slice(0, 0, 3, 1)
922  .Slice(1, 0, 3, 1)
923  .T())
924  .Add_(cam_to_world.Slice(0, 0, 3, 1)
925  .Slice(1, 3, 4, 1)
926  .T());
927  vertex_map = Image(vertex_list.View(vertex_map.AsTensor().GetShape())
928  .Contiguous());
929  }
930  auto normal_map_t = vertex_map.CreateNormalMap(invalid_fill)
931  .AsTensor()
932  .View({im_size, 3});
933  // all columns are the same
934  auto valid_idx =
935  normal_map_t.Slice(1, 0, 1, 1)
936  .IsFinite()
937  .LogicalAnd(vertex_list.Slice(1, 0, 1, 1).IsFinite())
938  .Reshape({im_size});
939  PointCloud pcd(
940  {{"positions",
941  vertex_list.GetItem({TensorKey::IndexTensor(valid_idx),
942  TensorKey::Slice(None, None, None)})},
943  {"normals",
944  normal_map_t.GetItem({TensorKey::IndexTensor(valid_idx),
945  TensorKey::Slice(None, None, None)})}});
946  if (color.GetRows() > 0) {
947  pcd.SetPointColors(
948  color.AsTensor()
949  .View({im_size, 3})
950  .GetItem({TensorKey::IndexTensor(valid_idx),
951  TensorKey::Slice(None, None, None)}));
952  }
953  return pcd;
954 }
955 
957  const core::Tensor &intrinsics,
958  const core::Tensor &extrinsics,
959  float depth_scale,
960  float depth_max,
961  int stride,
962  bool with_normals) {
963  core::AssertTensorDtypes(depth.AsTensor(), {core::UInt16, core::Float32});
964  core::AssertTensorShape(intrinsics, {3, 3});
965  core::AssertTensorShape(extrinsics, {4, 4});
966 
967  core::Tensor intrinsics_host =
968  intrinsics.To(core::Device("CPU:0"), core::Float64);
969  core::Tensor extrinsics_host =
970  extrinsics.To(core::Device("CPU:0"), core::Float64);
971 
972  if (with_normals) {
973  return CreatePointCloudWithNormals(depth, Image(), intrinsics_host,
974  extrinsics_host, depth_scale,
975  depth_max, stride);
976  } else {
979  points, utility::nullopt, intrinsics_host,
980  extrinsics_host, depth_scale, depth_max,
981  stride);
982  return PointCloud(points);
983  }
984 }
985 
987  const core::Tensor &intrinsics,
988  const core::Tensor &extrinsics,
989  float depth_scale,
990  float depth_max,
991  int stride,
992  bool with_normals) {
994  {core::UInt16, core::Float32});
995  core::AssertTensorShape(intrinsics, {3, 3});
996  core::AssertTensorShape(extrinsics, {4, 4});
997 
998  Image image_colors = rgbd_image.color_.To(core::Float32, /*copy=*/false);
999 
1000  if (with_normals) {
1001  return CreatePointCloudWithNormals(rgbd_image.depth_, image_colors,
1002  intrinsics, extrinsics, depth_scale,
1003  depth_max, stride);
1004  } else {
1005  core::Tensor points, colors, image_colors_t = image_colors.AsTensor();
1007  rgbd_image.depth_.AsTensor(), image_colors_t, points, colors,
1008  intrinsics, extrinsics, depth_scale, depth_max, stride);
1009  return PointCloud({{"positions", points}, {"colors", colors}});
1010  }
1011 }
1012 
1014  int height,
1015  const core::Tensor &intrinsics,
1016  const core::Tensor &extrinsics,
1017  float depth_scale,
1018  float depth_max) {
1019  if (!HasPointPositions()) {
1021  "Called ProjectToDepthImage on a point cloud with no Positions "
1022  "attribute. Returning empty image.");
1023  return geometry::Image();
1024  }
1025  core::AssertTensorShape(intrinsics, {3, 3});
1026  core::AssertTensorShape(extrinsics, {4, 4});
1027 
1028  core::Tensor depth =
1031  utility::nullopt, intrinsics, extrinsics,
1032  depth_scale, depth_max);
1033  return geometry::Image(depth);
1034 }
1035 
1037  int width,
1038  int height,
1039  const core::Tensor &intrinsics,
1040  const core::Tensor &extrinsics,
1041  float depth_scale,
1042  float depth_max) {
1043  if (!HasPointPositions()) {
1045  "Called ProjectToRGBDImage on a point cloud with no Positions "
1046  "attribute. Returning empty image.");
1047  return geometry::RGBDImage();
1048  }
1049  if (!HasPointColors()) {
1051  "Unable to project to RGBD without the Color attribute in the "
1052  "point cloud.");
1053  }
1054  core::AssertTensorShape(intrinsics, {3, 3});
1055  core::AssertTensorShape(extrinsics, {4, 4});
1056 
1057  core::Tensor depth =
1061 
1062  // Assume point colors are Float32 for kernel dispatch
1063  core::Tensor point_colors = GetPointColors();
1064  if (point_colors.GetDtype() == core::Dtype::UInt8) {
1065  point_colors = point_colors.To(core::Dtype::Float32) / 255.0;
1066  }
1067 
1068  kernel::pointcloud::Project(depth, color, GetPointPositions(), point_colors,
1069  intrinsics, extrinsics, depth_scale, depth_max);
1070 
1071  return geometry::RGBDImage(color, depth);
1072 }
1073 
1075  const cloudViewer::geometry::PointCloud &pcd_legacy,
1076  core::Dtype dtype,
1077  const core::Device &device) {
1078  geometry::PointCloud pcd(device);
1079  // Positions
1080  if (pcd_legacy.hasPoints()) {
1081  pcd.SetPointPositions(
1083  pcd_legacy.getEigenPoints(), dtype, device));
1084  } else {
1085  utility::LogWarning("Creating from an empty legacy PointCloud.");
1086  }
1087  // Normals (optional)
1088  if (pcd_legacy.hasNormals()) {
1090  pcd_legacy.getEigenNormals(), dtype, device));
1091  }
1092  if (pcd_legacy.hasColors()) {
1094  pcd_legacy.getEigenColors(), dtype, device));
1095  }
1096  return pcd;
1097 }
1098 
1101  // Positions
1102  if (HasPointPositions()) {
1103  pcd_legacy.reserveThePointsTable(
1104  static_cast<unsigned>(GetPointPositions().GetLength()));
1106  GetPointPositions()));
1107  }
1108  // Normals (optional)
1109  if (HasPointNormals()) {
1110  pcd_legacy.reserveTheNormsTable();
1111  pcd_legacy.addEigenNorms(
1113  GetPointNormals()));
1114  }
1115 
1116  // Colors (optional)
1117  if (HasPointColors()) {
1118  bool dtype_is_supported_for_conversion = true;
1119  double normalization_factor = 1.0;
1120  core::Dtype point_color_dtype = GetPointColors().GetDtype();
1121 
1122  if (point_color_dtype == core::UInt8) {
1123  normalization_factor =
1124  1.0 /
1125  static_cast<double>(std::numeric_limits<uint8_t>::max());
1126  } else if (point_color_dtype == core::UInt16) {
1127  normalization_factor =
1128  1.0 /
1129  static_cast<double>(std::numeric_limits<uint16_t>::max());
1130  } else if (point_color_dtype != core::Float32 &&
1131  point_color_dtype != core::Float64) {
1133  "Dtype {} of color attribute is not supported for "
1134  "conversion to LegacyPointCloud and will be skipped. "
1135  "Supported dtypes include UInt8, UIn16, Float32, and "
1136  "Float64",
1137  point_color_dtype.ToString());
1138  dtype_is_supported_for_conversion = false;
1139  }
1140 
1141  if (dtype_is_supported_for_conversion) {
1142  if (normalization_factor != 1.0) {
1143  core::Tensor rescaled_colors =
1145  normalization_factor;
1146  pcd_legacy.reserveTheRGBTable();
1147  pcd_legacy.addEigenColors(
1149  rescaled_colors));
1150  } else {
1151  pcd_legacy.reserveTheRGBTable();
1152  pcd_legacy.addEigenColors(
1154  GetPointColors()));
1155  }
1156  }
1157  }
1158  return pcd_legacy;
1159 }
1160 
1161 std::tuple<TriangleMesh, core::Tensor> PointCloud::HiddenPointRemoval(
1162  const core::Tensor &camera_location, double radius) const {
1163  core::AssertTensorShape(camera_location, {3});
1164  core::AssertTensorDevice(camera_location, GetDevice());
1165  (void)radius;
1166  // Fallback: return identity index map and an empty mesh.
1167  const int64_t n = HasPointPositions() ? GetPointPositions().GetLength() : 0;
1168  std::vector<int64_t> indices(n);
1169  std::iota(indices.begin(), indices.end(), 0);
1170  return std::make_tuple(TriangleMesh(GetDevice()),
1171  core::Tensor(std::move(indices)).To(GetDevice()));
1172 }
1173 
1175  size_t min_points,
1176  bool print_progress) const {
1177  (void)eps;
1178  (void)min_points;
1179  (void)print_progress;
1180  const int64_t n = HasPointPositions() ? GetPointPositions().GetLength() : 0;
1181  // Fallback: assign all points to label -1 (noise)
1182  std::vector<int32_t> labels(static_cast<size_t>(n), -1);
1183  return core::Tensor(std::move(labels)).To(GetDevice());
1184 }
1185 
1186 std::tuple<core::Tensor, core::Tensor> PointCloud::SegmentPlane(
1187  const double distance_threshold,
1188  const int ransac_n,
1189  const int num_iterations,
1190  const double probability) const {
1191  (void)distance_threshold;
1192  (void)ransac_n;
1193  (void)num_iterations;
1194  (void)probability;
1195  // Fallback: no plane found, return zeros and empty inliers.
1196  auto plane = core::Tensor::Zeros({4}, core::Float64, GetDevice());
1197  auto inliers = core::Tensor({}, core::Int64, GetDevice());
1198  return std::make_tuple(plane, inliers);
1199 }
1200 
1201 TriangleMesh PointCloud::ComputeConvexHull(bool joggle_inputs) const {
1202  // QHull needs double dtype on the CPU.
1203  static_assert(std::is_same<realT, double>::value,
1204  "Qhull realT is not double. Update code!");
1205  using int_t = int32_t;
1206  const auto int_dtype = core::Int32;
1207  core::Tensor coordinates(
1209 
1210  orgQhull::Qhull qhull;
1211  std::string options = "Qt"; // triangulated output
1212  if (joggle_inputs) {
1213  options = "QJ"; // joggle input to avoid precision problems
1214  }
1215  qhull.runQhull("", 3, coordinates.GetLength(),
1216  coordinates.GetDataPtr<double>(), options.c_str());
1217  orgQhull::QhullFacetList facets = qhull.facetList();
1218 
1219  core::Tensor vertices({qhull.vertexCount(), 3}, core::Float64),
1220  triangles({qhull.facetCount(), 3}, int_dtype),
1221  point_indices({qhull.vertexCount()}, int_dtype);
1222  std::unordered_map<int_t, int_t> vertex_map; // pcd -> conv hull
1223  int_t tidx = 0, next_vtx = 0;
1224  auto p_vertices = vertices.GetDataPtr<double>();
1225  auto p_triangle = triangles.GetDataPtr<int_t>();
1226  auto p_point_indices = point_indices.GetDataPtr<int_t>();
1227  for (orgQhull::QhullFacetList::iterator it = facets.begin();
1228  it != facets.end(); ++it) {
1229  if (!(*it).isGood()) continue;
1230 
1231  orgQhull::QhullVertexSet vSet = it->vertices();
1232  int_t triangle_subscript = 0;
1233  for (orgQhull::QhullVertexSet::iterator vIt = vSet.begin();
1234  vIt != vSet.end(); ++vIt, ++triangle_subscript) {
1235  orgQhull::QhullPoint p = (*vIt).point();
1236  int_t vidx = p.id();
1237 
1238  auto inserted = vertex_map.insert({vidx, next_vtx});
1239  if (inserted.second) {
1240  p_triangle[triangle_subscript] = next_vtx; // hull vertex idx
1241  double *coords = p.coordinates();
1242  std::copy(coords, coords + 3, p_vertices);
1243  p_vertices += 3;
1244  p_point_indices[next_vtx++] = vidx;
1245  } else {
1246  p_triangle[triangle_subscript] =
1247  inserted.first->second; // hull vertex idx
1248  }
1249  }
1250  if ((*it).isTopOrient()) {
1251  std::swap(p_triangle[0], p_triangle[1]);
1252  }
1253  tidx++;
1254  p_triangle += 3;
1255  }
1256  if (tidx < triangles.GetShape(0)) {
1257  triangles = triangles.Slice(0, 0, tidx);
1258  }
1259  if (next_vtx != vertices.GetShape(0)) {
1261  "Qhull output has incorrect number of vertices {} instead of "
1262  "reported {}",
1263  next_vtx, vertices.GetShape(0));
1264  }
1265 
1266  TriangleMesh convex_hull(vertices, triangles);
1267  convex_hull.SetVertexAttr("point_indices", point_indices);
1268  return convex_hull.To(GetPointPositions().GetDevice());
1269 }
1270 
1273 }
1274 
1277 }
1278 
1280  const core::Tensor &axis,
1281  int resolution,
1282  double translation,
1283  bool capping) const {
1284  using namespace vtkutils;
1285  return ExtrudeRotationLineSet(*this, angle, axis, resolution, translation,
1286  capping);
1287 }
1288 
1290  double scale,
1291  bool capping) const {
1292  using namespace vtkutils;
1293  return ExtrudeLinearLineSet(*this, vector, scale, capping);
1294 }
1295 
1297  bool invert) const {
1299  if (aabb.IsEmpty()) {
1301  "Bounding box is empty. Returning empty point cloud if "
1302  "invert is false, or the original point cloud if "
1303  "invert is true.");
1304  return invert ? Clone() : PointCloud(GetDevice());
1305  }
1306  return SelectByIndex(
1308 }
1309 
1310 PointCloud PointCloud::Crop(const OrientedBoundingBox &obb, bool invert) const {
1312  if (obb.IsEmpty()) {
1314  "Bounding box is empty. Returning empty point cloud if "
1315  "invert is false, or the original point cloud if "
1316  "invert is true.");
1317  return invert ? Clone() : PointCloud(GetDevice());
1318  }
1319  return SelectByIndex(
1321 }
1322 
1323 int PointCloud::PCAPartition(int max_points) {
1324  int num_partitions;
1325  core::Tensor partition_ids;
1326  std::tie(num_partitions, partition_ids) =
1328  SetPointAttr("partition_ids", partition_ids.To(GetDevice()));
1329  return num_partitions;
1330 }
1331 
1333  std::vector<Metric> metrics,
1334  MetricParameters params) const {
1335  if (IsEmpty() || pcd2.IsEmpty()) {
1336  utility::LogError("One or both input point clouds are empty!");
1337  }
1338  if (!IsCPU() || !pcd2.IsCPU()) {
1340  "ComputeDistance is implemented only on CPU. Computing on "
1341  "CPU.");
1342  }
1343  core::Tensor points1 = GetPointPositions().To(core::Device("CPU:0")),
1344  points2 = pcd2.GetPointPositions().To(core::Device("CPU:0"));
1345  [[maybe_unused]] core::Tensor indices12, indices21;
1346  core::Tensor sqr_distance12, sqr_distance21;
1347 
1348  core::nns::NearestNeighborSearch tree1(points1);
1349  core::nns::NearestNeighborSearch tree2(points2);
1350 
1351  if (!tree2.KnnIndex()) {
1352  utility::LogError("[ComputeDistance] Building KNN-Index failed!");
1353  }
1354  if (!tree1.KnnIndex()) {
1355  utility::LogError("[ComputeDistance] Building KNN-Index failed!");
1356  }
1357 
1358  std::tie(indices12, sqr_distance12) = tree2.KnnSearch(points1, 1);
1359  std::tie(indices21, sqr_distance21) = tree1.KnnSearch(points2, 1);
1360 
1361  return ComputeMetricsCommon(sqr_distance12.Sqrt_(), sqr_distance21.Sqrt_(),
1362  metrics, params);
1363 }
1364 
1366  bool have_all_attrs = HasPointAttr("opacity") && HasPointAttr("rot") &&
1367  HasPointAttr("scale") && HasPointAttr("f_dc");
1368  if (!have_all_attrs) { // not 3DGS, no messages.
1369  return false;
1370  }
1371  // Existing but invalid attributes cause errors.
1372  auto num_points = GetPointPositions().GetLength();
1373  core::AssertTensorShape(GetPointAttr("opacity"), {num_points, 1});
1374  core::AssertTensorShape(GetPointAttr("rot"), {num_points, 4});
1375  core::AssertTensorShape(GetPointAttr("scale"), {num_points, 3});
1376  core::AssertTensorShape(GetPointAttr("f_dc"), {num_points, 3});
1377  GaussianSplatGetSHOrder(); // Tests f_rest shape is valid.
1378  return true;
1379 }
1380 
1382  if (point_attr_.find("f_rest") == point_attr_.end()) {
1383  return 0;
1384  }
1385  const core::Tensor &f_rest = GetPointAttr("f_rest");
1386  auto num_points = GetPointPositions().GetLength();
1387  core::AssertTensorShape(f_rest, {num_points, core::None, 3});
1388  auto Nc = f_rest.GetShape(1);
1389  auto degp1 = static_cast<int>(sqrt(Nc + 1));
1390  if (degp1 * degp1 != Nc + 1) {
1392  "f_rest has incomplete Spherical Harmonics coefficients "
1393  "({}), expected 0, 3, 8 or 15.",
1394  Nc);
1395  }
1396  return degp1 - 1;
1397 }
1398 } // namespace geometry
1399 } // namespace t
1400 } // namespace cloudViewer
Common CUDA utilities.
#define CUDA_CALL(cuda_function,...)
Definition: CUDAUtils.h:49
filament::Texture::InternalFormat format
int width
int size
bool has_normals
int height
int points
math::float4 color
cmdLineReadable * params[]
#define AssertTensorDevice(tensor,...)
Definition: TensorCheck.h:45
#define AssertTensorDtype(tensor,...)
Definition: TensorCheck.h:21
#define AssertTensorDtypes(tensor,...)
Definition: TensorCheck.h:33
#define AssertTensorShape(tensor,...)
Definition: TensorCheck.h:61
size_t stride
bool copy
Definition: VtkUtils.cpp:74
A 3D cloud and its associated features (color, normals, scalar fields, etc.)
std::vector< Eigen::Vector3d > getEigenColors() const
void addEigenNorms(const std::vector< Eigen::Vector3d > &normals)
bool hasNormals() const override
Returns whether normals are enabled or not.
bool reserveTheNormsTable()
Reserves memory to store the compressed normals.
bool reserveTheRGBTable()
Reserves memory to store the RGB colors.
bool hasColors() const override
Returns whether colors are enabled or not.
void addEigenColors(const std::vector< Eigen::Vector3d > &colors)
bool reserveThePointsTable(unsigned _numberOfPoints)
Reserves memory to store the points coordinates.
std::vector< Eigen::Vector3d > getEigenNormals() const
virtual bool hasPoints() const
Definition: GenericCloud.h:37
std::vector< Eigen::Vector3d > getEigenPoints() const
void addPoints(const std::vector< CCVector3 > &points)
std::string ToString() const
Definition: Dtype.h:65
static const Dtype UInt8
Definition: Dtype.h:30
static const Dtype Float32
Definition: Dtype.h:24
int64_t ByteSize() const
Definition: Dtype.h:59
static const Dtype Bool
Definition: Dtype.h:34
std::pair< Tensor, Tensor > Find(const Tensor &input_keys)
Definition: HashSet.cpp:38
int64_t Size() const
Get the size (number of active entries) of the hash set.
Definition: HashSet.cpp:98
std::pair< Tensor, Tensor > Insert(const Tensor &input_keys)
Definition: HashSet.cpp:32
bool IsCUDA() const
Definition: Device.h:99
bool IsCPU() const
Definition: Device.h:95
TensorKey is used to represent single index, slice or advanced indexing on a Tensor.
Definition: TensorKey.h:26
static TensorKey IndexTensor(const Tensor &index_tensor)
Definition: TensorKey.cpp:144
static TensorKey Slice(utility::optional< int64_t > start, utility::optional< int64_t > stop, utility::optional< int64_t > step)
Definition: TensorKey.cpp:138
bool AllClose(const Tensor &other, double rtol=1e-5, double atol=1e-8) const
Definition: Tensor.cpp:1895
Tensor Sqrt() const
Element-wise square root of a tensor, returns a new tensor.
Definition: Tensor.cpp:1296
Tensor Contiguous() const
Definition: Tensor.cpp:772
Tensor Matmul(const Tensor &rhs) const
Definition: Tensor.cpp:1919
Tensor Ge(const Tensor &value) const
Definition: Tensor.cpp:1618
int64_t GetLength() const
Definition: Tensor.h:1125
Dtype GetDtype() const
Definition: Tensor.h:1164
void IndexAdd_(int64_t dim, const Tensor &index, const Tensor &src)
Advanced in-place reduction by index.
Definition: Tensor.cpp:991
Tensor LogicalNot() const
Definition: Tensor.cpp:1442
Tensor Inverse() const
Definition: Tensor.cpp:1982
Tensor Sub_(const Tensor &value)
Definition: Tensor.cpp:1153
Tensor IsFinite() const
Definition: Tensor.cpp:1382
Tensor SetItem(const Tensor &value)
Set all items. Equivalent to tensor[:] = value in Python.
Definition: Tensor.cpp:564
Tensor Max(const SizeVector &dims, bool keepdim=false) const
Definition: Tensor.cpp:1275
Tensor Mul_(const Tensor &value)
Definition: Tensor.cpp:1189
Tensor All(const utility::optional< SizeVector > &dims=utility::nullopt, bool keepdim=false) const
Definition: Tensor.cpp:1770
Tensor Le(const Tensor &value) const
Definition: Tensor.cpp:1650
static Tensor Zeros(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with zeros.
Definition: Tensor.cpp:406
Tensor Sqrt_()
Element-wise square root of a tensor, in-place.
Definition: Tensor.cpp:1302
Tensor Add_(const Tensor &value)
Definition: Tensor.cpp:1117
Device GetDevice() const override
Definition: Tensor.cpp:1435
Tensor ReinterpretCast(const core::Dtype &dtype) const
Definition: Tensor.cpp:388
Tensor Min(const SizeVector &dims, bool keepdim=false) const
Definition: Tensor.cpp:1268
Tensor Clone() const
Copy Tensor to the same device.
Definition: Tensor.h:502
Tensor Mean(const SizeVector &dims, bool keepdim=false) const
Definition: Tensor.cpp:1247
static Tensor Empty(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor with uninitialized values.
Definition: Tensor.cpp:400
static Tensor Ones(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with ones.
Definition: Tensor.cpp:412
Tensor IsInf() const
Definition: Tensor.cpp:1372
Tensor IsNan() const
Definition: Tensor.cpp:1362
SizeVector GetShape() const
Definition: Tensor.h:1127
Tensor Slice(int64_t dim, int64_t start, int64_t stop, int64_t step=1) const
Definition: Tensor.cpp:857
Tensor To(Dtype dtype, bool copy=false) const
Definition: Tensor.cpp:739
Tensor Floor() const
Element-wise floor value of a tensor, returning a new tensor.
Definition: Tensor.cpp:1411
static Tensor Full(const SizeVector &shape, T fill_value, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with specified value.
Definition: Tensor.h:253
A Class for nearest neighbor search.
std::tuple< Tensor, Tensor, Tensor > FixedRadiusSearch(const Tensor &query_points, double radius, bool sort=true)
bool HybridIndex(utility::optional< double > radius={})
bool FixedRadiusIndex(utility::optional< double > radius={})
std::tuple< Tensor, Tensor, Tensor > HybridSearch(const Tensor &query_points, const double radius, const int max_knn) const
std::pair< Tensor, Tensor > KnnSearch(const Tensor &query_points, int knn)
A bounding box that is aligned along the coordinate axes and defined by the min_bound and max_bound.
core::Tensor GetPointIndicesWithinBoundingBox(const core::Tensor &points) const
Indices to points that are within the bounding box.
static AxisAlignedBoundingBox CreateFromPoints(const core::Tensor &points)
core::Device GetDevice() const override
Returns the device attribute of this AxisAlignedBoundingBox.
bool IsEmpty() const override
Returns true iff the geometry is empty.
The base geometry class.
Definition: Geometry.h:23
GeometryType
Specifies possible geometry types.
Definition: Geometry.h:28
The Image class stores image with customizable rows, cols, channels, dtype and device.
Definition: Image.h:29
Image ClipTransform(float scale, float min_value, float max_value, float clip_fill=0.0f) const
Return new image after scaling and clipping image values.
Definition: Image.cpp:417
Image To(const core::Device &device, bool copy=false) const
Transfer the image to a specified device.
Definition: Image.h:132
core::Tensor AsTensor() const
Returns the underlying Tensor of the Image.
Definition: Image.h:124
int64_t GetCols() const
Get the number of columns of the image.
Definition: Image.h:87
int64_t GetRows() const
Get the number of rows of the image.
Definition: Image.h:84
A LineSet contains points and lines joining them and optionally attributes on the points and lines.
Definition: LineSet.h:85
A bounding box oriented along an arbitrary frame of reference.
bool IsEmpty() const override
Returns true iff the geometry is empty.
static OrientedBoundingBox CreateFromPoints(const core::Tensor &points, bool robust=false, MethodOBBCreate method=MethodOBBCreate::MINIMAL_APPROX)
core::Device GetDevice() const override
Returns the device attribute of this OrientedBoundingBox.
core::Tensor GetPointIndicesWithinBoundingBox(const core::Tensor &points) const
Indices to points that are within the bounding box.
A point cloud contains a list of 3D points.
Definition: PointCloud.h:82
void SetPointNormals(const core::Tensor &value)
Set the value of the "normals" attribute. Convenience function.
Definition: PointCloud.h:198
void OrientNormalsToAlignWithDirection(const core::Tensor &orientation_reference=core::Tensor::Init< float >({0, 0, 1}, core::Device("CPU:0")))
Function to orient the normals of a point cloud.
Definition: PointCloud.cpp:713
std::tuple< PointCloud, core::Tensor > RemoveNonFinitePoints(bool remove_nan=true, bool remove_infinite=true) const
Remove all points from the point cloud that have a nan entry, or infinite value. It also removes the ...
Definition: PointCloud.cpp:492
std::tuple< core::Tensor, core::Tensor > SegmentPlane(const double distance_threshold=0.01, const int ransac_n=3, const int num_iterations=100, const double probability=0.99999999) const
Segment PointCloud plane using the RANSAC algorithm. This is a wrapper for a CPU implementation and a...
core::Tensor GetCenter() const
Returns the center for point coordinates.
Definition: PointCloud.cpp:108
int GaussianSplatGetSHOrder() const
Returns the order of spherical harmonics used for Gaussian Splatting. Returns 0 if f_rest is not pres...
PointCloud & NormalizeNormals()
Normalize point normals to length 1.
Definition: PointCloud.cpp:538
PointCloud & Rotate(const core::Tensor &R, const core::Tensor &center)
Rotates the PointPositions and PointNormals (if exists).
Definition: PointCloud.cpp:206
core::Tensor & GetPointNormals()
Get the value of the "normals" attribute. Convenience function.
Definition: PointCloud.h:130
PointCloud VoxelDownSample(double voxel_size, const std::string &reduction="mean") const
Downsamples a point cloud with a specified voxel size.
Definition: PointCloud.cpp:280
PointCloud Crop(const AxisAlignedBoundingBox &aabb, bool invert=false) const
Function to crop pointcloud into output pointcloud.
PointCloud FarthestPointDownSample(const size_t num_samples, const size_t start_index=0) const
Downsample a pointcloud into output pointcloud with a set of points has farthest distance.
Definition: PointCloud.cpp:388
void SetPointColors(const core::Tensor &value)
Set the value of the "colors" attribute. Convenience function.
Definition: PointCloud.h:192
bool HasPointAttr(const std::string &key) const
Definition: PointCloud.h:207
PointCloud SelectByIndex(const core::Tensor &indices, bool invert=false, bool remove_duplicates=false) const
Select points from input pointcloud, based on indices list into output point cloud.
Definition: PointCloud.cpp:245
core::Tensor GetMaxBound() const
Returns the max bound for point coordinates.
Definition: PointCloud.cpp:104
TriangleMesh ComputeConvexHull(bool joggle_inputs=false) const
static PointCloud FromLegacy(const cloudViewer::geometry::PointCloud &pcd_legacy, core::Dtype dtype=core::Float32, const core::Device &device=core::Device("CPU:0"))
Create a PointCloud from a legacy CloudViewer PointCloud.
PointCloud RandomDownSample(double sampling_ratio) const
Downsample a pointcloud by selecting random index point and its attributes.
Definition: PointCloud.cpp:363
std::tuple< TriangleMesh, core::Tensor > HiddenPointRemoval(const core::Tensor &camera_location, double radius) const
This is an implementation of the Hidden Point Removal operator described in Katz et....
OrientedBoundingBox GetOrientedBoundingBox() const
Create an oriented bounding box from attribute "positions".
void EstimateNormals(const utility::optional< int > max_nn=30, const utility::optional< double > radius=utility::nullopt)
Function to estimate point normals. If the point cloud normals exist, the estimated normals are orien...
Definition: PointCloud.cpp:619
void SetPointPositions(const core::Tensor &value)
Set the value of the "positions" attribute. Convenience function.
Definition: PointCloud.h:186
core::Tensor GetMinBound() const
Returns the min bound for point coordinates.
Definition: PointCloud.cpp:100
std::string ToString() const
Text description.
Definition: PointCloud.cpp:74
core::Tensor & GetPointPositions()
Get the value of the "positions" attribute. Convenience function.
Definition: PointCloud.h:124
LineSet ExtrudeRotation(double angle, const core::Tensor &axis, int resolution=16, double translation=0.0, bool capping=true) const
void OrientNormalsTowardsCameraLocation(const core::Tensor &camera_location=core::Tensor::Zeros({3}, core::Float32, core::Device("CPU:0")))
Function to orient the normals of a point cloud.
Definition: PointCloud.cpp:740
std::tuple< PointCloud, core::Tensor > RemoveStatisticalOutliers(size_t nb_neighbors, double std_ratio) const
Remove points that are further away from their nb_neighbor neighbors in average. This function is not...
Definition: PointCloud.cpp:454
bool IsEmpty() const override
Returns !HasPointPositions().
Definition: PointCloud.h:257
PointCloud To(const core::Device &device, bool copy=false) const
Definition: PointCloud.cpp:112
PointCloud SelectByMask(const core::Tensor &boolean_mask, bool invert=false) const
Select points from input pointcloud, based on boolean mask indices into output point cloud.
Definition: PointCloud.cpp:219
PointCloud UniformDownSample(size_t every_k_points) const
Downsamples a point cloud by selecting every kth index point and its attributes.
Definition: PointCloud.cpp:345
AxisAlignedBoundingBox GetAxisAlignedBoundingBox() const
Create an axis-aligned bounding box from attribute "positions".
void EstimateColorGradients(const utility::optional< int > max_nn=30, const utility::optional< double > radius=utility::nullopt)
Function to compute point color gradients. If radius is provided, then HybridSearch is used,...
Definition: PointCloud.cpp:781
PointCloud & PaintUniformColor(const core::Tensor &color)
Assigns uniform color to the point cloud.
Definition: PointCloud.cpp:558
cloudViewer::geometry::PointCloud ToLegacy() const
Convert to a legacy CloudViewer PointCloud.
core::Device GetDevice() const override
Returns the device attribute of this PointCloud.
Definition: PointCloud.h:421
static PointCloud CreateFromDepthImage(const Image &depth, const core::Tensor &intrinsics, const core::Tensor &extrinsics=core::Tensor::Eye(4, core::Float32, core::Device("CPU:0")), float depth_scale=1000.0f, float depth_max=3.0f, int stride=1, bool with_normals=false)
Factory function to create a point cloud from a depth image and a camera model.
Definition: PointCloud.cpp:956
LineSet ExtrudeLinear(const core::Tensor &vector, double scale=1.0, bool capping=true) const
std::tuple< PointCloud, core::Tensor > ComputeBoundaryPoints(double radius, int max_nn=30, double angle_threshold=90.0) const
Compute the boundary points of a point cloud. The implementation is inspired by the PCL implementatio...
Definition: PointCloud.cpp:574
geometry::RGBDImage ProjectToRGBDImage(int width, int height, const core::Tensor &intrinsics, const core::Tensor &extrinsics=core::Tensor::Eye(4, core::Float32, core::Device("CPU:0")), float depth_scale=1000.0f, float depth_max=3.0f)
Project a point cloud to an RGBD image.
void RemovePointAttr(const std::string &key)
Definition: PointCloud.h:216
PointCloud & Translate(const core::Tensor &translation, bool relative=true)
Translates the PointPositions of the PointCloud.
Definition: PointCloud.cpp:182
geometry::Image ProjectToDepthImage(int width, int height, const core::Tensor &intrinsics, const core::Tensor &extrinsics=core::Tensor::Eye(4, core::Float32, core::Device("CPU:0")), float depth_scale=1000.0f, float depth_max=3.0f)
Project a point cloud to a depth image.
PointCloud Clone() const
Returns copy of the point cloud on the same device.
Definition: PointCloud.cpp:123
std::tuple< PointCloud, core::Tensor > RemoveRadiusOutliers(size_t nb_points, double search_radius) const
Remove points that have less than nb_points neighbors in a sphere of a given radius.
Definition: PointCloud.cpp:426
std::tuple< PointCloud, core::Tensor > RemoveDuplicatedPoints() const
Remove duplicated points and there associated attributes.
Definition: PointCloud.cpp:516
const TensorMap & GetPointAttr() const
Getter for point_attr_ TensorMap. Used in Pybind.
Definition: PointCloud.h:111
core::Tensor ComputeMetrics(const PointCloud &pcd2, std::vector< Metric > metrics={Metric::ChamferDistance}, MetricParameters params=MetricParameters()) const
PointCloud & Scale(double scale, const core::Tensor &center)
Scales the PointPositions of the PointCloud.
Definition: PointCloud.cpp:196
PointCloud Append(const PointCloud &other) const
Definition: PointCloud.cpp:125
void SetPointAttr(const std::string &key, const core::Tensor &value)
Definition: PointCloud.h:177
core::Tensor ClusterDBSCAN(double eps, size_t min_points, bool print_progress=false) const
Cluster PointCloud using the DBSCAN algorithm Ester et al., "A Density-Based Algorithm for Discoverin...
core::Tensor & GetPointColors()
Get the value of the "colors" attribute. Convenience function.
Definition: PointCloud.h:127
PointCloud(const core::Device &device=core::Device("CPU:0"))
Definition: PointCloud.cpp:49
PointCloud & Transform(const core::Tensor &transformation)
Transforms the PointPositions and PointNormals (if exist) of the PointCloud.
Definition: PointCloud.cpp:171
void OrientNormalsConsistentTangentPlane(size_t k, const double lambda=0.0, const double cos_alpha_tol=1.0)
Function to consistently orient estimated normals based on consistent tangent planes as described in ...
Definition: PointCloud.cpp:766
static PointCloud CreateFromRGBDImage(const RGBDImage &rgbd_image, const core::Tensor &intrinsics, const core::Tensor &extrinsics=core::Tensor::Eye(4, core::Float32, core::Device("CPU:0")), float depth_scale=1000.0f, float depth_max=3.0f, int stride=1, bool with_normals=false)
Factory function to create a point cloud from an RGB-D image and a camera model.
Definition: PointCloud.cpp:986
RGBDImage A pair of color and depth images.
Definition: RGBDImage.h:21
Image depth_
The depth image.
Definition: RGBDImage.h:106
Image color_
The color image.
Definition: RGBDImage.h:104
std::string GetPrimaryKey() const
Returns the primary key of the TensorMap.
Definition: TensorMap.h:159
A triangle mesh contains vertices and triangles.
Definition: TriangleMesh.h:98
TriangleMesh To(const core::Device &device, bool copy=false) const
void SetVertexAttr(const std::string &key, const core::Tensor &value)
Definition: TriangleMesh.h:254
constexpr bool has_value() const noexcept
Definition: Optional.h:440
constexpr T const & value() const &
Definition: Optional.h:465
double colors[3]
double normals[3]
#define LogWarning(...)
Definition: Logging.h:72
#define LogError(...)
Definition: Logging.h:60
#define LogDebug(...)
Definition: Logging.h:90
__device__ __forceinline__ float infinity()
Definition: result_set.h:36
__host__ __device__ float length(float2 v)
Definition: cutil_math.h:1162
int max(int a, int b)
Definition: cutil_math.h:48
double std_dev(const T &vec)
std::vector< Eigen::Vector3d > TensorToEigenVector3dVector(const core::Tensor &tensor)
Converts a tensor of shape (N, 3) to std::vector<Eigen::Vector3d>. An exception will be thrown if the...
core::Tensor EigenVector3dVectorToTensor(const std::vector< Eigen::Vector3d > &values, core::Dtype dtype, const core::Device &device)
Converts a vector of Eigen::Vector3d to a (N, 3) tensor. This function also takes care of dtype conve...
const Dtype Bool
Definition: Dtype.cpp:52
const Dtype Int64
Definition: Dtype.cpp:47
const Dtype UInt8
Definition: Dtype.cpp:48
const Dtype Float64
Definition: Dtype.cpp:43
Tensor Minimum(const Tensor &input, const Tensor &other)
Computes the element-wise minimum of input and other. The tensors must have same data type and device...
const Dtype UInt16
Definition: Dtype.cpp:49
constexpr utility::nullopt_t None
Definition: TensorKey.h:20
const Dtype Int32
Definition: Dtype.cpp:46
const Dtype Float32
Definition: Dtype.cpp:42
std::tuple< int, core::Tensor > PCAPartition(core::Tensor &points, int max_points)
void ComputeBoundaryPointsCPU(const core::Tensor &points, const core::Tensor &normals, const core::Tensor &indices, const core::Tensor &counts, core::Tensor &mask, double angle_threshold)
void OrientNormalsTowardsCameraLocationCPU(const core::Tensor &points, core::Tensor &normals, const core::Tensor &camera)
void Project(core::Tensor &depth, utility::optional< std::reference_wrapper< core::Tensor >> image_colors, const core::Tensor &points, utility::optional< std::reference_wrapper< const core::Tensor >> colors, const core::Tensor &intrinsics, const core::Tensor &extrinsics, float depth_scale, float depth_max)
Definition: PointCloud.cpp:63
void EstimateNormalsFromCovariancesCPU(const core::Tensor &covariances, core::Tensor &normals, const bool has_normals)
void EstimateColorGradientsUsingRadiusSearchCPU(const core::Tensor &points, const core::Tensor &normals, const core::Tensor &colors, core::Tensor &color_gradient, const double &radius)
void EstimateColorGradientsUsingKNNSearchCPU(const core::Tensor &points, const core::Tensor &normals, const core::Tensor &colors, core::Tensor &color_gradient, const int64_t &max_nn)
void NormalizeNormalsCPU(core::Tensor &normals)
void OrientNormalsToAlignWithDirectionCPU(core::Tensor &normals, const core::Tensor &direction)
void EstimateCovariancesUsingHybridSearchCPU(const core::Tensor &points, core::Tensor &covariances, const double &radius, const int64_t &max_nn)
void Unproject(const core::Tensor &depth, utility::optional< std::reference_wrapper< const core::Tensor >> image_colors, core::Tensor &points, utility::optional< std::reference_wrapper< core::Tensor >> colors, const core::Tensor &intrinsics, const core::Tensor &extrinsics, float depth_scale, float depth_max, int64_t stride)
Definition: PointCloud.cpp:24
void EstimateCovariancesUsingRadiusSearchCPU(const core::Tensor &points, core::Tensor &covariances, const double &radius)
void EstimateCovariancesUsingKNNSearchCPU(const core::Tensor &points, core::Tensor &covariances, const int64_t &max_nn)
void EstimateColorGradientsUsingHybridSearchCPU(const core::Tensor &points, const core::Tensor &normals, const core::Tensor &colors, core::Tensor &color_gradient, const double &radius, const int64_t &max_nn)
void TransformNormals(const core::Tensor &transformation, core::Tensor &normals)
Definition: Transform.cpp:41
void TransformPoints(const core::Tensor &transformation, core::Tensor &points)
Definition: Transform.cpp:20
void RotatePoints(const core::Tensor &R, core::Tensor &points, const core::Tensor &center)
Definition: Transform.cpp:63
void RotateNormals(const core::Tensor &R, core::Tensor &normals)
Definition: Transform.cpp:88
CLOUDVIEWER_LOCAL LineSet ExtrudeLinearLineSet(const PointCloud &pointcloud, const core::Tensor &vector, double scale, bool capping)
Definition: VtkUtils.cpp:588
CLOUDVIEWER_LOCAL LineSet ExtrudeRotationLineSet(const PointCloud &pointcloud, const double angle, const core::Tensor &axis, int resolution, double translation, bool capping)
Definition: VtkUtils.cpp:540
core::Tensor ComputeMetricsCommon(core::Tensor distance12, core::Tensor distance21, std::vector< Metric > metrics, MetricParameters params)
Definition: Metrics.cpp:16
static PointCloud CreatePointCloudWithNormals(const Image &depth_in, const Image &color_in, const core::Tensor &intrinsics_in, const core::Tensor &extrinsics, float depth_scale, float depth_max, int stride)
Definition: PointCloud.cpp:872
std::mt19937 * GetEngine()
Definition: Random.cpp:55
std::mutex * GetMutex()
Definition: Random.cpp:57
constexpr nullopt_t nullopt
Definition: Optional.h:136
Generic file read and write utility for python interface.
void swap(cloudViewer::core::SmallVectorImpl< T > &LHS, cloudViewer::core::SmallVectorImpl< T > &RHS)
Implement std::swap in terms of SmallVector swap.
Definition: SmallVector.h:1370
Holder for various parameters required by metrics.
Definition: Geometry.h:103