ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
RegistrationTools.cpp
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - CloudViewer: www.cloudViewer.org -
3 // ----------------------------------------------------------------------------
4 // Copyright (c) 2018-2024 www.cloudViewer.org
5 // SPDX-License-Identifier: MIT
6 // ----------------------------------------------------------------------------
7 
8 #include <RegistrationTools.h>
9 
10 // local
11 #include <CVKdTree.h>
12 #include <CVMath.h>
13 #include <CVPointCloud.h>
14 #include <CloudSamplingTools.h>
16 #include <Garbage.h>
19 #include <Jacobi.h>
21 #include <NormalDistribution.h>
22 #include <ParallelSort.h>
23 #include <ReferenceCloud.h>
24 #include <ScalarFieldTools.h>
25 
26 // system
27 #include <ctime>
28 
29 using namespace cloudViewer;
30 
32  const ScaledTransformation& inTrans,
33  int filters,
34  const CCVector3& toBeAlignedGravityCenter,
35  const CCVector3& referenceGravityCenter,
36  ScaledTransformation& outTrans) {
37  outTrans = inTrans;
38 
39  // filter rotation
40  int rotationFilter = (filters & SKIP_ROTATION);
41  if (inTrans.R.isValid() && (rotationFilter != 0)) {
42  const SquareMatrix R(inTrans.R); // copy it in case inTrans and
43  // outTrans are the same!
44  outTrans.R.toIdentity();
45  if (rotationFilter ==
46  SKIP_RYZ) // keep only the rotation component around X
47  {
48  // we use a specific Euler angles convention here
49  if (R.getValue(0, 2) < 1.0) {
50  PointCoordinateType phi = -asin(R.getValue(0, 2));
51  PointCoordinateType cos_phi = cos(phi);
52  PointCoordinateType theta = atan2(R.getValue(1, 2) / cos_phi,
53  R.getValue(2, 2) / cos_phi);
54  PointCoordinateType cos_theta = cos(theta);
55  PointCoordinateType sin_theta = sin(theta);
56 
57  outTrans.R.setValue(1, 1, cos_theta);
58  outTrans.R.setValue(2, 2, cos_theta);
59  outTrans.R.setValue(2, 1, sin_theta);
60  outTrans.R.setValue(1, 2, -sin_theta);
61  } else {
62  // simpler/faster to ignore this (very) specific case!
63  }
64  } else if (rotationFilter ==
65  SKIP_RXZ) // keep only the rotation component around Y
66  {
67  // we use a specific Euler angles convention here
68  if (R.getValue(2, 1) < 1.0) {
69  PointCoordinateType theta = asin(R.getValue(2, 1));
70  PointCoordinateType cos_theta = cos(theta);
71  PointCoordinateType phi = atan2(-R.getValue(2, 0) / cos_theta,
72  R.getValue(2, 2) / cos_theta);
73  PointCoordinateType cos_phi = cos(phi);
74  PointCoordinateType sin_phi = sin(phi);
75 
76  outTrans.R.setValue(0, 0, cos_phi);
77  outTrans.R.setValue(2, 2, cos_phi);
78  outTrans.R.setValue(0, 2, sin_phi);
79  outTrans.R.setValue(2, 0, -sin_phi);
80  } else {
81  // simpler/faster to ignore this (very) specific case!
82  }
83  } else if (rotationFilter ==
84  SKIP_RXY) // keep only the rotation component around Z
85  {
86  // we use a specific Euler angles convention here
87  if (R.getValue(2, 0) < 1.0) {
88  PointCoordinateType theta_rad = -asin(R.getValue(2, 0));
89  PointCoordinateType cos_theta = cos(theta_rad);
90  PointCoordinateType phi_rad =
91  atan2(R.getValue(1, 0) / cos_theta,
92  R.getValue(0, 0) / cos_theta);
93  PointCoordinateType cos_phi = cos(phi_rad);
94  PointCoordinateType sin_phi = sin(phi_rad);
95 
96  outTrans.R.setValue(0, 0, cos_phi);
97  outTrans.R.setValue(1, 1, cos_phi);
98  outTrans.R.setValue(1, 0, sin_phi);
99  outTrans.R.setValue(0, 1, -sin_phi);
100  } else {
101  // simpler/faster to ignore this (very) specific case!
102  }
103  } else {
104  // we ignore all rotation components
105  }
106 
107  // fix the translation to compensate for the change of the rotation
108  // matrix
109  CCVector3d alignedGravityCenter =
110  outTrans.apply(toBeAlignedGravityCenter.toDouble());
111  outTrans.T +=
112  (referenceGravityCenter.toDouble() - alignedGravityCenter);
113  }
114 
115  // filter translation
116  if (filters & SKIP_TRANSLATION) {
117  if (filters & SKIP_TX) outTrans.T.x = 0;
118  if (filters & SKIP_TY) outTrans.T.y = 0;
119  if (filters & SKIP_TZ) outTrans.T.z = 0;
120  }
121 }
122 
123 struct ModelCloud {
124  ModelCloud() : cloud(nullptr), weights(nullptr) {}
125  ModelCloud(const ModelCloud& m) : cloud(m.cloud), weights(m.weights) {}
128 };
129 
130 struct DataCloud {
132  : cloud(nullptr),
133  rotatedCloud(nullptr),
134  weights(nullptr),
135  CPSetRef(nullptr),
136  CPSetPlain(nullptr) {}
137 
143 };
144 
146  GenericIndexedCloudPersist* inputModelCloud,
147  GenericIndexedMesh* inputModelMesh,
148  GenericIndexedCloudPersist* inputDataCloud,
149  const Parameters& params,
150  ScaledTransformation& transform,
151  double& finalRMS,
152  unsigned& finalPointCount,
153  GenericProgressCallback* progressCb /*=0*/) {
154  if (!inputModelCloud || !inputDataCloud) {
155  assert(false);
157  }
158 
159  // hopefully the user will understand it's not possible ;)
160  finalRMS = -1.0;
161 
163  Garbage<ScalarField> sfGarbage;
164 
165  // DATA CLOUD (will move)
166  DataCloud data;
167  {
168  // we also want to use the same number of points for registration as
169  // initially defined by the user!
170  unsigned dataSamplingLimit =
171  params.finalOverlapRatio != 1.0
172  ? static_cast<unsigned>(params.samplingLimit /
173  params.finalOverlapRatio)
174  : params.samplingLimit;
175 
176  // we resample the cloud if it's too big (speed increase)
177  if (inputDataCloud->size() > dataSamplingLimit) {
179  inputDataCloud, dataSamplingLimit);
180  if (!data.cloud) {
182  }
183  cloudGarbage.add(data.cloud);
184 
185  // if we need to resample the weights as well
186  if (params.dataWeights) {
187  data.weights = new ScalarField("ResampledDataWeights");
188  sfGarbage.add(data.weights);
189 
190  unsigned destCount = data.cloud->size();
191  if (data.weights->resizeSafe(destCount)) {
192  for (unsigned i = 0; i < destCount; ++i) {
193  unsigned pointIndex =
194  data.cloud->getPointGlobalIndex(i);
195  data.weights->setValue(
196  i, params.dataWeights->getValue(pointIndex));
197  }
198  data.weights->computeMinAndMax();
199  } else {
200  // not enough memory
202  }
203  }
204  } else // no need to resample
205  {
206  // we still create a 'fake' reference cloud with all the points
207  data.cloud = new ReferenceCloud(inputDataCloud);
208  cloudGarbage.add(data.cloud);
209  if (!data.cloud->addPointIndex(0, inputDataCloud->size())) {
210  // not enough memory
212  }
213  // we use the input weights
214  data.weights = params.dataWeights;
215  }
216 
217  // eventually we'll need a scalar field on the data cloud
218  if (!data.cloud->enableScalarField()) {
219  // not enough memory
221  }
222  }
223  assert(data.cloud);
224 
225  // octree level for cloud/mesh distances computation
226  unsigned char meshDistOctreeLevel = 8;
227 
228  // MODEL ENTITY (reference, won't move)
229  ModelCloud model;
230  if (inputModelMesh) {
231  assert(!params.modelWeights);
232 
233  // we'll use the mesh vertices to estimate the right octree level
234  DgmOctree dataOctree(data.cloud);
235  DgmOctree modelOctree(inputModelCloud);
236  if (dataOctree.build() < static_cast<int>(data.cloud->size()) ||
237  modelOctree.build() < static_cast<int>(inputModelCloud->size())) {
238  // an error occurred during the octree computation: probably there's
239  // not enough memory
241  }
242 
243  meshDistOctreeLevel =
244  dataOctree.findBestLevelForComparisonWithOctree(&modelOctree);
245  } else /*if (inputModelCloud)*/
246  {
247  // we resample the cloud if it's too big (speed increase)
248  if (inputModelCloud->size() > params.samplingLimit) {
249  ReferenceCloud* subModelCloud =
251  inputModelCloud, params.samplingLimit);
252  if (!subModelCloud) {
253  // not enough memory
255  }
256  cloudGarbage.add(subModelCloud);
257 
258  // if we need to resample the weights as well
259  if (params.modelWeights) {
260  model.weights = new ScalarField("ResampledModelWeights");
261  sfGarbage.add(model.weights);
262 
263  unsigned destCount = subModelCloud->size();
264  if (model.weights->resizeSafe(destCount)) {
265  for (unsigned i = 0; i < destCount; ++i) {
266  unsigned pointIndex =
267  subModelCloud->getPointGlobalIndex(i);
268  model.weights->setValue(
269  i, params.modelWeights->getValue(pointIndex));
270  }
271  model.weights->computeMinAndMax();
272  } else {
273  // not enough memory
275  }
276  }
277  model.cloud = subModelCloud;
278  } else {
279  // we use the input cloud and weights
280  model.cloud = inputModelCloud;
281  model.weights = params.modelWeights;
282  }
283  assert(model.cloud);
284  }
285 
286  // for partial overlap
287  unsigned maxOverlapCount = 0;
288  std::vector<ScalarType> overlapDistances;
289  if (params.finalOverlapRatio < 1.0) {
290  // we pre-allocate the memory to sort distance values later
291  try {
292  overlapDistances.resize(data.cloud->size());
293  } catch (const std::bad_alloc&) {
294  // not enough memory
296  }
297  maxOverlapCount = static_cast<unsigned>(params.finalOverlapRatio *
298  data.cloud->size());
299  assert(maxOverlapCount != 0);
300  }
301 
302  // Closest Point Set (see ICP algorithm)
303  if (inputModelMesh) {
304  data.CPSetPlain = new PointCloud;
305  cloudGarbage.add(data.CPSetPlain);
306  } else {
307  data.CPSetRef = new ReferenceCloud(model.cloud);
308  cloudGarbage.add(data.CPSetRef);
309  }
310 
311  // per-point couple weights
312  ScalarField* coupleWeights = nullptr;
313  if (model.weights || data.weights) {
314  coupleWeights = new ScalarField("CoupleWeights");
315  sfGarbage.add(coupleWeights);
316  }
317 
318  // we compute the initial distance between the two clouds (and the CPSet by
319  // the way) data.cloud->forEach(ScalarFieldTools::SetScalarValueToNaN);
320  // //DGM: done automatically in computeCloud2CloudDistances now
321  if (inputModelMesh) {
322  assert(data.CPSetPlain);
324  c2mDistParams;
325  c2mDistParams.octreeLevel = meshDistOctreeLevel;
326  c2mDistParams.CPSet = data.CPSetPlain;
327  c2mDistParams.maxThreadCount = params.maxThreadCount;
329  data.cloud, inputModelMesh, c2mDistParams, progressCb) <
330  0) {
331  // an error occurred during distances computation...
333  }
334  } else if (inputModelCloud) {
335  assert(data.CPSetRef);
337  c2cDistParams;
338  c2cDistParams.CPSet = data.CPSetRef;
339  c2cDistParams.maxThreadCount = params.maxThreadCount;
341  data.cloud, model.cloud, c2cDistParams, progressCb) < 0) {
342  // an error occurred during distances computation...
344  }
345  } else {
346  assert(false);
347  }
348 
349  FILE* fTraceFile = nullptr;
350 #ifdef CV_DEBUG
351  fTraceFile = fopen("registration_trace_log.csv", "wt");
352  if (fTraceFile) fprintf(fTraceFile, "Iteration; RMS; Point count;\n");
353 #endif
354 
355  double lastStepRMS = -1.0, initialDeltaRMS = -1.0;
356  ScaledTransformation currentTrans;
358 
359  for (unsigned iteration = 0;; ++iteration) {
360  if (progressCb && progressCb->isCancelRequested()) {
362  break;
363  }
364 
365  // shall we remove the farthest points?
366  bool pointOrderHasBeenChanged = false;
367  if (params.filterOutFarthestPoints) {
369  N.computeParameters(data.cloud);
370  if (N.isValid()) {
371  ScalarType mu, sigma2;
372  N.getParameters(mu, sigma2);
373  ScalarType maxDistance =
374  static_cast<ScalarType>(mu + 2.5 * sqrt(sigma2));
375 
376  DataCloud filteredData;
377  filteredData.cloud =
378  new ReferenceCloud(data.cloud->getAssociatedCloud());
379  cloudGarbage.add(filteredData.cloud);
380 
381  if (data.CPSetRef) {
382  filteredData.CPSetRef = new ReferenceCloud(
383  data.CPSetRef
384  ->getAssociatedCloud()); // we must also
385  // update the
386  // CPSet!
387  cloudGarbage.add(filteredData.CPSetRef);
388  } else if (data.CPSetPlain) {
389  filteredData.CPSetPlain =
390  new PointCloud; // we must also update the CPSet!
391  cloudGarbage.add(filteredData.CPSetPlain);
392  }
393 
394  if (data.weights) {
395  filteredData.weights =
396  new ScalarField("ResampledDataWeights");
397  sfGarbage.add(filteredData.weights);
398  }
399 
400  unsigned pointCount = data.cloud->size();
401  if (!filteredData.cloud->reserve(pointCount) ||
402  (filteredData.CPSetRef &&
403  !filteredData.CPSetRef->reserve(pointCount)) ||
404  (filteredData.CPSetPlain &&
405  !filteredData.CPSetPlain->reserve(pointCount)) ||
406  (filteredData.weights &&
407  !filteredData.weights->reserveSafe(pointCount))) {
408  // not enough memory
410  break;
411  }
412 
413  // we keep only the points with "not too high" distances
414  for (unsigned i = 0; i < pointCount; ++i) {
415  if (data.cloud->getPointScalarValue(i) <= maxDistance) {
416  filteredData.cloud->addPointIndex(
417  data.cloud->getPointGlobalIndex(i));
418  if (filteredData.CPSetRef)
419  filteredData.CPSetRef->addPointIndex(
420  data.CPSetRef->getPointGlobalIndex(i));
421  else if (filteredData.CPSetPlain)
422  filteredData.CPSetPlain->addPoint(
423  *(data.CPSetPlain->getPoint(i)));
424  if (filteredData.weights)
425  filteredData.weights->addElement(
426  data.weights->getValue(i));
427  }
428  }
429 
430  // resize should be ok as we have called reserve first
431  filteredData.cloud->resize(
432  filteredData.cloud
433  ->size()); // should always be ok as current
434  // size < pointCount
435  if (filteredData.CPSetRef)
436  filteredData.CPSetRef->resize(
437  filteredData.CPSetRef->size());
438  else if (filteredData.CPSetPlain)
439  filteredData.CPSetPlain->resize(
440  filteredData.CPSetPlain->size());
441  if (filteredData.weights)
442  filteredData.weights->resize(
443  filteredData.weights->currentSize());
444 
445  // replace old structures by new ones
446  cloudGarbage.destroy(data.cloud);
447  if (data.CPSetRef)
448  cloudGarbage.destroy(data.CPSetRef);
449  else if (data.CPSetPlain)
450  cloudGarbage.destroy(data.CPSetPlain);
451  if (data.weights) sfGarbage.destroy(data.weights);
452  data = filteredData;
453 
454  pointOrderHasBeenChanged = true;
455  }
456  }
457 
458  // shall we ignore/remove some points based on their distance?
459  DataCloud trueData;
460  unsigned pointCount = data.cloud->size();
461  if (maxOverlapCount != 0 && pointCount > maxOverlapCount) {
462  assert(overlapDistances.size() >= pointCount);
463  for (unsigned i = 0; i < pointCount; ++i) {
464  overlapDistances[i] = data.cloud->getPointScalarValue(i);
465  assert(overlapDistances[i] == overlapDistances[i]);
466  }
467 
468  ParallelSort(overlapDistances.begin(),
469  overlapDistances.begin() + pointCount);
470 
471  assert(maxOverlapCount != 0);
472  ScalarType maxOverlapDist = overlapDistances[maxOverlapCount - 1];
473 
474  DataCloud filteredData;
475  filteredData.cloud =
476  new ReferenceCloud(data.cloud->getAssociatedCloud());
477  if (data.CPSetRef) {
478  filteredData.CPSetRef = new ReferenceCloud(
479  data.CPSetRef
480  ->getAssociatedCloud()); // we must also update
481  // the CPSet!
482  cloudGarbage.add(filteredData.CPSetRef);
483  } else if (data.CPSetPlain) {
484  filteredData.CPSetPlain =
485  new PointCloud; // we must also update the CPSet!
486  cloudGarbage.add(filteredData.CPSetPlain);
487  }
488  cloudGarbage.add(filteredData.cloud);
489  if (data.weights) {
490  filteredData.weights = new ScalarField("ResampledDataWeights");
491  sfGarbage.add(filteredData.weights);
492  }
493 
494  if (!filteredData.cloud->reserve(
495  pointCount) // should be maxOverlapCount in theory, but
496  // there may be several points with the
497  // same value as maxOverlapDist!
498  || (filteredData.CPSetRef &&
499  !filteredData.CPSetRef->reserve(pointCount)) ||
500  (filteredData.CPSetPlain &&
501  !filteredData.CPSetPlain->reserve(pointCount)) ||
502  (filteredData.weights &&
503  !filteredData.weights->reserveSafe(pointCount))) {
504  // not enough memory
506  break;
507  }
508 
509  // we keep only the points with "not too high" distances
510  for (unsigned i = 0; i < pointCount; ++i) {
511  if (data.cloud->getPointScalarValue(i) <= maxOverlapDist) {
512  filteredData.cloud->addPointIndex(
513  data.cloud->getPointGlobalIndex(i));
514  if (filteredData.CPSetRef)
515  filteredData.CPSetRef->addPointIndex(
516  data.CPSetRef->getPointGlobalIndex(i));
517  else if (filteredData.CPSetPlain)
518  filteredData.CPSetPlain->addPoint(
519  *(data.CPSetPlain->getPoint(i)));
520  if (filteredData.weights)
521  filteredData.weights->addElement(
522  data.weights->getValue(i));
523  }
524  }
525  assert(filteredData.cloud->size() >= maxOverlapCount);
526 
527  // resize should be ok as we have called reserve first
528  filteredData.cloud->resize(
529  filteredData.cloud->size()); // should always be ok as
530  // current size < pointCount
531  if (filteredData.CPSetRef)
532  filteredData.CPSetRef->resize(filteredData.CPSetRef->size());
533  else if (filteredData.CPSetPlain)
534  filteredData.CPSetPlain->resize(
535  filteredData.CPSetPlain->size());
536  if (filteredData.weights)
537  filteredData.weights->resize(
538  filteredData.weights->currentSize());
539 
540  //(temporarily) replace old structures by new ones
541  trueData = data;
542  data = filteredData;
543  }
544 
545  // update couple weights (if any)
546  if (coupleWeights) {
547  assert(model.weights || data.weights);
548  unsigned count = data.cloud->size();
549  assert(!model.weights ||
550  (data.CPSetRef && data.CPSetRef->size() == count));
551 
552  if (coupleWeights->currentSize() != count &&
553  !coupleWeights->resizeSafe(count)) {
554  // not enough memory to store weights
556  break;
557  }
558  for (unsigned i = 0; i < count; ++i) {
559  ScalarType wd = (data.weights ? data.weights->getValue(i)
560  : static_cast<ScalarType>(1.0));
561  ScalarType wm =
562  (model.weights
563  ? model.weights->getValue(
564  data.CPSetRef->getPointGlobalIndex(
565  i))
566  : static_cast<ScalarType>(
567  1.0)); // model weights are only
568  // support with a reference
569  // cloud!
570  coupleWeights->setValue(i, wd * wm);
571  }
572  coupleWeights->computeMinAndMax();
573  }
574 
575  // we can now compute the best registration transformation for this step
576  //(now that we have selected the points that will be used for
577  // registration!)
578  {
579  // if we use weights, we have to compute weighted RMS!!!
580  double meanSquareValue = 0.0;
581  double wiSum = 0.0; // we normalize the weights by their sum
582 
583  for (unsigned i = 0; i < data.cloud->size(); ++i) {
584  ScalarType V = data.cloud->getPointScalarValue(i);
585  if (ScalarField::ValidValue(V)) {
586  double wi = 1.0;
587  if (coupleWeights) {
588  ScalarType w = coupleWeights->getValue(i);
589  if (!ScalarField::ValidValue(w)) continue;
590  wi = std::abs(w);
591  }
592  double Vd = wi * V;
593  wiSum += wi * wi;
594  meanSquareValue += Vd * Vd;
595  }
596  }
597 
598  // 12/11/2008 - A.BEY: ICP guarantees only the decrease of the
599  // squared distances sum (not the distances sum)
600  double meanSquareError =
601  (wiSum != 0
602  ? static_cast<ScalarType>(meanSquareValue / wiSum)
603  : 0);
604 
605  double rms = sqrt(meanSquareError);
606 
607 #ifdef CV_DEBUG
608  if (fTraceFile)
609  fprintf(fTraceFile, "%u; %f; %u;\n", iteration, rms,
610  data.cloud->size());
611 #endif
612  if (iteration == 0) {
613  // progress notification
614  if (progressCb) {
615  // on the first iteration, we init/show the dialog
616  if (progressCb->textCanBeEdited()) {
617  progressCb->setMethodTitle("Clouds registration");
618  char buffer[256];
619  sprintf(buffer, "Initial RMS = %f\n", rms);
620  progressCb->setInfo(buffer);
621  }
622  progressCb->update(0);
623  progressCb->start();
624  }
625 
626  finalRMS = rms;
627  finalPointCount = data.cloud->size();
628 
629  if (LessThanEpsilon(rms)) {
630  // nothing to do
632  break;
633  }
634  } else {
635  assert(lastStepRMS >= 0.0);
636 
637  if (rms > lastStepRMS) // error increase!
638  {
639  result = iteration == 1 ? ICP_NOTHING_TO_DO
641  break;
642  }
643 
644  // error update (RMS)
645  double deltaRMS = lastStepRMS - rms;
646  // should be better!
647  assert(deltaRMS >= 0.0);
648 
649  // we update the global transformation matrix
650  if (currentTrans.R.isValid()) {
651  if (transform.R.isValid())
652  transform.R = currentTrans.R * transform.R;
653  else
654  transform.R = currentTrans.R;
655 
656  transform.T = currentTrans.R * transform.T;
657  }
658 
659  if (params.adjustScale) {
660  transform.s *= currentTrans.s;
661  transform.T *= currentTrans.s;
662  }
663 
664  transform.T += currentTrans.T;
665 
666  finalRMS = rms;
667  finalPointCount = data.cloud->size();
668 
669  // stop criterion
670  if ((params.convType == MAX_ERROR_CONVERGENCE &&
671  deltaRMS < params.minRMSDecrease) // convergence reached
672  ||
673  (params.convType == MAX_ITER_CONVERGENCE &&
674  iteration >=
675  params.nbMaxIterations) // max iteration reached
676  ) {
678  break;
679  }
680 
681  // progress notification
682  if (progressCb) {
683  if (progressCb->textCanBeEdited()) {
684  char buffer[256];
685  sprintf(buffer, "RMS = %f [-%f]\n", rms, deltaRMS);
686  progressCb->setInfo(buffer);
687  }
688  if (iteration == 1) {
689  initialDeltaRMS = deltaRMS;
690  progressCb->update(0);
691  } else {
692  assert(initialDeltaRMS >= 0.0);
693  float progressPercent = static_cast<float>(
694  (initialDeltaRMS - deltaRMS) /
695  (initialDeltaRMS - params.minRMSDecrease) *
696  100.0);
697  progressCb->update(progressPercent);
698  }
699  }
700  }
701 
702  lastStepRMS = rms;
703  }
704 
705  // single iteration of the registration procedure
706  currentTrans = ScaledTransformation();
707  CCVector3 dataGravityCenter, modelGravityCenter;
709  data.cloud,
710  data.CPSetRef ? static_cast<cloudViewer::GenericCloud*>(
711  data.CPSetRef)
712  : static_cast<cloudViewer::GenericCloud*>(
713  data.CPSetPlain),
714  currentTrans, params.adjustScale, coupleWeights, PC_ONE,
715  &dataGravityCenter, &modelGravityCenter)) {
717  break;
718  }
719 
720  // restore original data sets (if any were stored)
721  if (trueData.cloud) {
722  cloudGarbage.destroy(data.cloud);
723  if (data.CPSetRef)
724  cloudGarbage.destroy(data.CPSetRef);
725  else if (data.CPSetPlain)
726  cloudGarbage.destroy(data.CPSetPlain);
727  if (data.weights) sfGarbage.destroy(data.weights);
728  data = trueData;
729  }
730 
731  // shall we filter some components of the resulting transformation?
732  if (params.transformationFilters != SKIP_NONE) {
733  // filter translation (in place)
734  FilterTransformation(currentTrans, params.transformationFilters,
735  dataGravityCenter, modelGravityCenter,
736  currentTrans);
737  }
738 
739  // get rotated data cloud
740  if (!data.rotatedCloud || pointOrderHasBeenChanged) {
741  // we create a new structure, with rotated points
742  PointCloud* rotatedDataCloud =
744  currentTrans);
745  if (!rotatedDataCloud) {
746  // not enough memory
748  break;
749  }
750  // replace data.rotatedCloud
751  if (data.rotatedCloud) cloudGarbage.destroy(data.rotatedCloud);
752  data.rotatedCloud = rotatedDataCloud;
753  cloudGarbage.add(data.rotatedCloud);
754 
755  // update data.cloud
756  data.cloud->clear();
757  data.cloud->setAssociatedCloud(data.rotatedCloud);
758  if (!data.cloud->addPointIndex(0, data.rotatedCloud->size())) {
759  // not enough memory
761  break;
762  }
763  } else {
764  // we simply have to rotate the existing temporary cloud
765  currentTrans.apply(*data.rotatedCloud);
766  data.rotatedCloud->invalidateBoundingBox(); // invalidate bb
767 
768  // DGM: warning, we must manually invalidate the ReferenceCloud bbox
769  // after rotation!
770  data.cloud->invalidateBoundingBox();
771  }
772 
773  // compute (new) distances to model
774  if (inputModelMesh) {
776  c2mDistParams;
777  c2mDistParams.octreeLevel = meshDistOctreeLevel;
778  c2mDistParams.CPSet = data.CPSetPlain;
779  c2mDistParams.maxThreadCount = params.maxThreadCount;
781  data.cloud, inputModelMesh, c2mDistParams) < 0) {
782  // an error occurred during distances computation...
784  break;
785  }
786  } else if (inputDataCloud) {
788  c2cDistParams;
789  c2cDistParams.CPSet = data.CPSetRef;
790  c2cDistParams.maxThreadCount = params.maxThreadCount;
792  data.cloud, model.cloud, c2cDistParams) < 0) {
793  // an error occurred during distances computation...
795  break;
796  }
797  } else {
798  assert(false);
799  }
800  }
801 
802  // end of tracefile
803  if (fTraceFile) {
804  fclose(fTraceFile);
805  fTraceFile = nullptr;
806  }
807 
808  // end of progress notification
809  if (progressCb) {
810  progressCb->stop();
811  }
812 
813  return result;
814 }
815 
817  GenericCloud* lCloud,
818  GenericCloud* rCloud,
819  ScaledTransformation& trans,
820  bool fixedScale /*=false*/) {
821  return RegistrationProcedure(lCloud, rCloud, trans, !fixedScale);
822 }
823 
825  GenericCloud* rCloud,
826  const ScaledTransformation& trans) {
827  assert(rCloud && lCloud);
828  if (!rCloud || !lCloud || rCloud->size() != lCloud->size() ||
829  rCloud->size() < 3)
830  return false;
831 
832  double rms = 0.0;
833 
834  rCloud->placeIteratorAtBeginning();
835  lCloud->placeIteratorAtBeginning();
836  unsigned count = rCloud->size();
837 
838  for (unsigned i = 0; i < count; i++) {
839  const CCVector3* Ri = rCloud->getNextPoint();
840  const CCVector3* Li = lCloud->getNextPoint();
841  CCVector3 Lit = trans.apply(*Li);
842 
843  rms += (*Ri - Lit).norm2();
844  }
845 
846  return sqrt(rms / count);
847 }
848 
850  GenericCloud* P, // data
851  GenericCloud* X, // model
852  ScaledTransformation& trans,
853  bool adjustScale /*=false*/,
854  ScalarField* coupleWeights /*=0*/,
855  PointCoordinateType aPrioriScale /*=1.0f*/,
856  CCVector3* _Gp /*=nullptr*/,
857  CCVector3* _Gx /*=nullptr*/) {
858  // resulting transformation (R is invalid on initialization, T is (0,0,0)
859  // and s==1)
860  trans.R.invalidate();
861  trans.T = CCVector3(0, 0, 0);
862  trans.s = PC_ONE;
863 
864  if (P == nullptr || X == nullptr || P->size() != X->size() || P->size() < 3)
865  return false;
866 
867  // centers of mass
868  CCVector3 Gp =
869  coupleWeights
871  P, coupleWeights)
873  CCVector3 Gx =
874  coupleWeights
876  X, coupleWeights)
878 
879  // specific case: 3 points only
880  // See section 5.A in Horn's paper
881  if (P->size() == 3) {
882  // compute the first set normal
884  const CCVector3* Ap = P->getNextPoint();
885  const CCVector3* Bp = P->getNextPoint();
886  const CCVector3* Cp = P->getNextPoint();
887  CCVector3 Np(0, 0, 1);
888  {
889  Np = (*Bp - *Ap).cross(*Cp - *Ap);
890  double norm = Np.normd();
891  if (LessThanEpsilon(norm)) {
892  return false;
893  }
894  Np /= static_cast<PointCoordinateType>(norm);
895  }
896  // compute the second set normal
897  X->placeIteratorAtBeginning();
898  const CCVector3* Ax = X->getNextPoint();
899  const CCVector3* Bx = X->getNextPoint();
900  const CCVector3* Cx = X->getNextPoint();
901  CCVector3 Nx(0, 0, 1);
902  {
903  Nx = (*Bx - *Ax).cross(*Cx - *Ax);
904  double norm = Nx.normd();
905  if (LessThanEpsilon(norm)) {
906  return false;
907  }
908  Nx /= static_cast<PointCoordinateType>(norm);
909  }
910  // now the rotation is simply the rotation from Nx to Np, centered on Gx
911  CCVector3 a = Np.cross(Nx);
912  if (LessThanEpsilon(a.norm())) {
913  trans.R = SquareMatrix(3);
914  trans.R.toIdentity();
915  if (Np.dot(Nx) < 0) {
916  trans.R.scale(-PC_ONE);
917  }
918  } else {
919  double cos_t = Np.dot(Nx);
920  assert(cos_t > -1.0 && cos_t < 1.0); // see above
921  double s = sqrt((1 + cos_t) * 2);
922  double q[4] = {
923  s / 2, a.x / s, a.y / s,
924  a.z / s}; // don't forget to normalize the quaternion
925  double qnorm =
926  q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3];
927  assert(qnorm >= ZERO_TOLERANCE_D);
928  qnorm = sqrt(qnorm);
929  q[0] /= qnorm;
930  q[1] /= qnorm;
931  q[2] /= qnorm;
932  q[3] /= qnorm;
933  trans.R.initFromQuaternion(q);
934  }
935 
936  if (adjustScale) {
937  double sumNormP = (*Bp - *Ap).norm() + (*Cp - *Bp).norm() +
938  (*Ap - *Cp).norm();
939  sumNormP *= aPrioriScale;
940  if (LessThanEpsilon(sumNormP)) {
941  return false;
942  }
943  double sumNormX = (*Bx - *Ax).norm() + (*Cx - *Bx).norm() +
944  (*Ax - *Cx).norm();
945  trans.s = static_cast<PointCoordinateType>(
946  sumNormX / sumNormP); // sumNormX / (sumNormP * Sa) in fact
947  }
948 
949  // we deduce the first translation
950  // #26 in besl paper, modified with the scale as in jschmidt
951  trans.T = Gx.toDouble() - (trans.R * Gp) * (aPrioriScale * trans.s);
952 
953  // we need to find the rotation in the (X) plane now
954  {
955  CCVector3 App = trans.apply(*Ap);
956  CCVector3 Bpp = trans.apply(*Bp);
957  CCVector3 Cpp = trans.apply(*Cp);
958 
959  double C = 0;
960  double S = 0;
961  CCVector3 Ssum(0, 0, 0);
962  CCVector3 rx;
963  CCVector3 rp;
964 
965  rx = *Ax - Gx;
966  rp = App - Gx;
967  C = rx.dot(rp);
968  Ssum = rx.cross(rp);
969 
970  rx = *Bx - Gx;
971  rp = Bpp - Gx;
972  C += rx.dot(rp);
973  Ssum += rx.cross(rp);
974 
975  rx = *Cx - Gx;
976  rp = Cpp - Gx;
977  C += rx.dot(rp);
978  Ssum += rx.cross(rp);
979 
980  S = Ssum.dot(Nx);
981  double Q = sqrt(S * S + C * C);
982  if (LessThanEpsilon(Q)) {
983  return false;
984  }
985 
986  PointCoordinateType sin_t = static_cast<PointCoordinateType>(S / Q);
987  PointCoordinateType cos_t = static_cast<PointCoordinateType>(C / Q);
988  PointCoordinateType inv_cos_t = 1 - cos_t;
989 
990  const PointCoordinateType& l1 = Nx.x;
991  const PointCoordinateType& l2 = Nx.y;
992  const PointCoordinateType& l3 = Nx.z;
993 
994  PointCoordinateType l1_inv_cos_t = l1 * inv_cos_t;
995  PointCoordinateType l3_inv_cos_t = l3 * inv_cos_t;
996 
997  SquareMatrix R(3);
998  // 1st column
999  R.m_values[0][0] = cos_t + l1 * l1_inv_cos_t;
1000  R.m_values[0][1] = l2 * l1_inv_cos_t + l3 * sin_t;
1001  R.m_values[0][2] = l3 * l1_inv_cos_t - l2 * sin_t;
1002 
1003  // 2nd column
1004  R.m_values[1][0] = l2 * l1_inv_cos_t - l3 * sin_t;
1005  R.m_values[1][1] = cos_t + l2 * l2 * inv_cos_t;
1006  R.m_values[1][2] = l2 * l3_inv_cos_t + l1 * sin_t;
1007 
1008  // 3rd column
1009  R.m_values[2][0] = l3 * l1_inv_cos_t + l2 * sin_t;
1010  R.m_values[2][1] = l2 * l3_inv_cos_t - l1 * sin_t;
1011  R.m_values[2][2] = cos_t + l3 * l3_inv_cos_t;
1012 
1013  trans.R = R * trans.R;
1014  // update T as well
1015  trans.T = Gx.toDouble() - (trans.R * Gp) * (aPrioriScale * trans.s);
1016  }
1017  } else {
1018  CCVector3 bbMin;
1019  CCVector3 bbMax;
1020  X->getBoundingBox(bbMin, bbMax);
1021 
1022  // if the data cloud is equivalent to a single point (for instance
1023  // it's the case when the two clouds are very far away from
1024  // each other in the ICP process) we try to get the two clouds closer
1025  CCVector3 diag = bbMax - bbMin;
1026  if (LessThanEpsilon(std::abs(diag.x) + std::abs(diag.y) +
1027  std::abs(diag.z))) {
1028  trans.T = Gx - Gp * aPrioriScale;
1029  return true;
1030  }
1031 
1032  // Cross covariance matrix, eq #24 in Besl92 (but with weights, if any)
1033  SquareMatrixd Sigma_px =
1034  (coupleWeights
1037  P, X, Gp, Gx, coupleWeights)
1039  ComputeCrossCovarianceMatrix(P, X, Gp, Gx));
1040  if (!Sigma_px.isValid()) return false;
1041 
1042  // transpose sigma_px
1043  SquareMatrixd Sigma_px_t = Sigma_px.transposed();
1044 
1045  SquareMatrixd Aij = Sigma_px - Sigma_px_t;
1046 
1047  double trace = Sigma_px.trace(); // that is the sum of diagonal
1048  // elements of sigma_px
1049 
1050  SquareMatrixd traceI3(
1051  3); // create the I matrix with eigvals equal to trace
1052  traceI3.m_values[0][0] = trace;
1053  traceI3.m_values[1][1] = trace;
1054  traceI3.m_values[2][2] = trace;
1055 
1056  SquareMatrixd bottomMat = Sigma_px + Sigma_px_t - traceI3;
1057 
1058  // we build up the registration matrix (see ICP algorithm)
1059  SquareMatrixd QSigma(4); // #25 in the paper (besl)
1060 
1061  QSigma.m_values[0][0] = trace;
1062 
1063  QSigma.m_values[0][1] = QSigma.m_values[1][0] = Aij.m_values[1][2];
1064  QSigma.m_values[0][2] = QSigma.m_values[2][0] = Aij.m_values[2][0];
1065  QSigma.m_values[0][3] = QSigma.m_values[3][0] = Aij.m_values[0][1];
1066 
1067  QSigma.m_values[1][1] = bottomMat.m_values[0][0];
1068  QSigma.m_values[1][2] = bottomMat.m_values[0][1];
1069  QSigma.m_values[1][3] = bottomMat.m_values[0][2];
1070 
1071  QSigma.m_values[2][1] = bottomMat.m_values[1][0];
1072  QSigma.m_values[2][2] = bottomMat.m_values[1][1];
1073  QSigma.m_values[2][3] = bottomMat.m_values[1][2];
1074 
1075  QSigma.m_values[3][1] = bottomMat.m_values[2][0];
1076  QSigma.m_values[3][2] = bottomMat.m_values[2][1];
1077  QSigma.m_values[3][3] = bottomMat.m_values[2][2];
1078 
1079  // we compute its eigenvalues and eigenvectors
1080  SquareMatrixd eigVectors;
1081  std::vector<double> eigValues;
1082  if (!Jacobi<double>::ComputeEigenValuesAndVectors(QSigma, eigVectors,
1083  eigValues, false)) {
1084  // failure
1085  return false;
1086  }
1087 
1088  // as Besl says, the best rotation corresponds to the eigenvector
1089  // associated to the biggest eigenvalue
1090  double qR[4];
1091  double maxEigValue = 0;
1092  Jacobi<double>::GetMaxEigenValueAndVector(eigVectors, eigValues,
1093  maxEigValue, qR);
1094 
1095  // these eigenvalue and eigenvector correspond to a quaternion --> we
1096  // get the corresponding matrix
1097  trans.R.initFromQuaternion(qR);
1098 
1099  if (adjustScale) {
1100  // two accumulators
1101  double acc_num = 0.0;
1102  double acc_denom = 0.0;
1103 
1104  // now deduce the scale (refer to "Point Set Registration with
1105  // Integrated Scale Estimation", Zinsser et. al, PRIP 2005)
1106  X->placeIteratorAtBeginning();
1108 
1109  unsigned count = X->size();
1110  assert(P->size() == count);
1111  for (unsigned i = 0; i < count; ++i) {
1112  //'a' refers to the data 'A' (moving) = P
1113  //'b' refers to the model 'B' (not moving) = X
1114  CCVector3d a_tilde =
1115  trans.R * (*(P->getNextPoint()) -
1116  Gp); // a_tilde_i = R * (a_i - a_mean)
1117  CCVector3d b_tilde = (*(X->getNextPoint()) -
1118  Gx); // b_tilde_j = (b_j - b_mean)
1119 
1120  acc_num += b_tilde.dot(a_tilde);
1121  acc_denom += a_tilde.dot(a_tilde);
1122  }
1123 
1124  // DGM: acc_2 can't be 0 because we already have checked that the
1125  // bbox is not a single point!
1126  assert(acc_denom > 0.0);
1127  trans.s = static_cast<PointCoordinateType>(
1128  std::abs(acc_num / acc_denom));
1129  }
1130 
1131  // and we deduce the translation
1132  // #26 in besl paper, modified with the scale as in jschmidt
1133  trans.T = Gx.toDouble() - (trans.R * Gp) * (aPrioriScale * trans.s);
1134  }
1135 
1136  return true;
1137 }
1138 
1140  GenericIndexedCloud* dataCloud,
1141  ScaledTransformation& transform,
1142  ScalarType delta,
1143  ScalarType beta,
1144  PointCoordinateType overlap,
1145  unsigned nbBases,
1146  unsigned nbTries,
1147  GenericProgressCallback* progressCb,
1148  unsigned nbMaxCandidates) {
1149  // DGM: KDTree::buildFromCloud will call reset right away!
1150  // if (progressCb)
1151  //{
1152  // if (progressCb->textCanBeEdited())
1153  // {
1154  // progressCb->setMethodTitle("Clouds registration");
1155  // progressCb->setInfo("Starting 4PCS");
1156  // }
1157  // progressCb->update(0);
1158  // progressCb->start();
1159  // }
1160 
1161  // Initialize random seed with current time
1162  srand(static_cast<unsigned>(time(nullptr)));
1163 
1164  unsigned bestScore = 0, score = 0;
1165  transform.R.invalidate();
1166  transform.T = CCVector3(0, 0, 0);
1167 
1168  // Adapt overlap to the model cloud size
1169  {
1170  CCVector3 bbMin, bbMax;
1171  modelCloud->getBoundingBox(bbMin, bbMax);
1172  CCVector3 diff = bbMax - bbMin;
1173  overlap *= diff.norm() / 2;
1174  }
1175 
1176  // Build the associated KDtrees
1177  KDTree* dataTree = new KDTree();
1178  if (!dataTree->buildFromCloud(dataCloud, progressCb)) {
1179  delete dataTree;
1180  return false;
1181  }
1182  KDTree* modelTree = new KDTree();
1183  if (!modelTree->buildFromCloud(modelCloud, progressCb)) {
1184  delete dataTree;
1185  delete modelTree;
1186  return false;
1187  }
1188 
1189  // if (progressCb)
1190  // progressCb->stop();
1191 
1192  for (unsigned i = 0; i < nbBases; i++) {
1193  // Randomly find the current reference base
1194  Base reference;
1195  if (!FindBase(modelCloud, overlap, nbTries, reference)) continue;
1196 
1197  // Search for all the congruent bases in the second cloud
1198  std::vector<Base> candidates;
1199  unsigned count = dataCloud->size();
1200  candidates.reserve(count);
1201  if (candidates.capacity() < count) // not enough memory
1202  {
1203  delete dataTree;
1204  delete modelTree;
1205  transform.R = SquareMatrix();
1206  return false;
1207  }
1208  const CCVector3* referenceBasePoints[4];
1209  {
1210  for (unsigned j = 0; j < 4; j++)
1211  referenceBasePoints[j] =
1212  modelCloud->getPoint(reference.getIndex(j));
1213  }
1214  int result = FindCongruentBases(dataTree, beta, referenceBasePoints,
1215  candidates);
1216  if (result == 0)
1217  continue;
1218  else if (result < 0) // something bad happened!
1219  {
1220  delete dataTree;
1221  delete modelTree;
1222  transform.R = SquareMatrix();
1223  return false;
1224  }
1225 
1226  // Compute rigid transforms and filter bases if necessary
1227  {
1228  std::vector<ScaledTransformation> transforms;
1229  if (!FilterCandidates(modelCloud, dataCloud, reference, candidates,
1230  nbMaxCandidates, transforms)) {
1231  delete dataTree;
1232  delete modelTree;
1233  transform.R = SquareMatrix();
1234  return false;
1235  }
1236 
1237  for (unsigned j = 0; j < candidates.size(); j++) {
1238  // Register the current candidate base with the reference base
1239  const ScaledTransformation& RT = transforms[j];
1240  // Apply the rigid transform to the data cloud and compute the
1241  // registration score
1242  if (RT.R.isValid()) {
1243  score = ComputeRegistrationScore(modelTree, dataCloud,
1244  delta, RT);
1245 
1246  // Keep parameters that lead to the best result
1247  if (score > bestScore) {
1248  transform.R = RT.R;
1249  transform.T = RT.T;
1250  bestScore = score;
1251  }
1252  }
1253  }
1254  }
1255 
1256  if (progressCb) {
1257  if (progressCb->textCanBeEdited()) {
1258  char buffer[256];
1259  sprintf(buffer, "Trial %u/%u [best score = %u]\n", i + 1,
1260  nbBases, bestScore);
1261  progressCb->setInfo(buffer);
1262  progressCb->update(((i + 1) * 100.0f) / nbBases);
1263  }
1264 
1265  if (progressCb->isCancelRequested()) {
1266  delete dataTree;
1267  delete modelTree;
1268  transform.R = SquareMatrix();
1269  return false;
1270  }
1271  }
1272  }
1273 
1274  delete dataTree;
1275  delete modelTree;
1276 
1277  if (progressCb) {
1278  progressCb->stop();
1279  }
1280 
1281  return (bestScore > 0);
1282 }
1283 
1285  KDTree* modelTree,
1286  GenericIndexedCloud* dataCloud,
1287  ScalarType delta,
1288  const ScaledTransformation& dataToModel) {
1289  CCVector3 Q;
1290 
1291  unsigned score = 0;
1292 
1293  unsigned count = dataCloud->size();
1294  for (unsigned i = 0; i < count; ++i) {
1295  dataCloud->getPoint(i, Q);
1296  // Apply rigid transform to each point
1297  Q = (dataToModel.R * Q + dataToModel.T).toPC();
1298  // Check if there is a point in the model cloud that is close enough to
1299  // q
1300  if (modelTree->findPointBelowDistance(Q.u, delta)) score++;
1301  }
1302 
1303  return score;
1304 }
1305 
1307  PointCoordinateType overlap,
1308  unsigned nbTries,
1309  Base& base) {
1310  unsigned a, b, c, d;
1311  unsigned i, size;
1312  PointCoordinateType f, best, d0, d1, d2, x, y, z, w;
1313  const CCVector3 *p0, *p1, *p2, *p3;
1314  CCVector3 normal, u, v;
1315 
1316  overlap *= overlap;
1317  size = cloud->size();
1318  best = 0.;
1319  b = c = 0;
1320  a = rand() % size;
1321  p0 = cloud->getPoint(a);
1322  // Randomly pick 3 points as sparsed as possible
1323  for (i = 0; i < nbTries; i++) {
1324  unsigned t1 = (rand() % size);
1325  unsigned t2 = (rand() % size);
1326  if (t1 == a || t2 == a || t1 == t2) continue;
1327 
1328  p1 = cloud->getPoint(t1);
1329  p2 = cloud->getPoint(t2);
1330  // Checked that the selected points are not more than overlap-distant
1331  // from p0
1332  u = *p1 - *p0;
1333  if (u.norm2() > overlap) continue;
1334  u = *p2 - *p0;
1335  if (u.norm2() > overlap) continue;
1336 
1337  // compute [p0, p1, p2] area thanks to cross product
1338  x = ((p1->y - p0->y) * (p2->z - p0->z)) -
1339  ((p1->z - p0->z) * (p2->y - p0->y));
1340  y = ((p1->z - p0->z) * (p2->x - p0->x)) -
1341  ((p1->x - p0->x) * (p2->z - p0->z));
1342  z = ((p1->x - p0->x) * (p2->y - p0->y)) -
1343  ((p1->y - p0->y) * (p2->x - p0->x));
1344  // don't need to compute the true area : f=(area²)*2 is sufficient for
1345  // comparison
1346  f = x * x + y * y + z * z;
1347  if (f > best) {
1348  b = t1;
1349  c = t2;
1350  best = f;
1351  normal.x = x;
1352  normal.y = y;
1353  normal.z = z;
1354  }
1355  }
1356 
1357  if (b == c) return false;
1358 
1359  // Once we found the points, we have to search for a fourth coplanar point
1360  f = normal.norm();
1361  if (f <= 0) return false;
1362  normal *= 1.0f / f;
1363  // plane equation : p lies in the plane if x*p[0] + y*p[1] + z*p[2] + w = 0
1364  x = normal.x;
1365  y = normal.y;
1366  z = normal.z;
1367  w = -(x * p0->x) - (y * p0->y) - (z * p0->z);
1368  d = a;
1369  best = -1.;
1370  p1 = cloud->getPoint(b);
1371  p2 = cloud->getPoint(c);
1372  for (i = 0; i < nbTries; i++) {
1373  unsigned t1 = (rand() % size);
1374  if (t1 == a || t1 == b || t1 == c) continue;
1375  p3 = cloud->getPoint(t1);
1376  // p3 must be close enough to at least two other points (considering
1377  // overlap)
1378  d0 = (*p3 - *p0).norm2();
1379  d1 = (*p3 - *p1).norm2();
1380  d2 = (*p3 - *p2).norm2();
1381  if ((d0 >= overlap && d1 >= overlap) ||
1382  (d0 >= overlap && d2 >= overlap) ||
1383  (d1 >= overlap && d2 >= overlap))
1384  continue;
1385  // Compute distance to the plane (cloud[a], cloud[b], cloud[c])
1386  f = std::abs((x * p3->x) + (y * p3->y) + (z * p3->z) + w);
1387  // keep the point which is the closest to the plane, while being as far
1388  // as possible from the other three points
1389  f = (f + 1.0f) / (sqrt(d0) + sqrt(d1) + sqrt(d2));
1390  if ((best < 0.) || (f < best)) {
1391  d = t1;
1392  best = f;
1393  }
1394  }
1395 
1396  // Store the result in the base parameter
1397  if (d != a) {
1398  // Find the points order in the quadrilateral
1399  p0 = cloud->getPoint(a);
1400  p1 = cloud->getPoint(b);
1401  p2 = cloud->getPoint(c);
1402  p3 = cloud->getPoint(d);
1403  // Search for the diagonnals of the convexe hull (3 tests max)
1404  // Note : if the convexe hull is made of 3 points, the points order has
1405  // no importance
1406  u = (*p1 - *p0) * (*p2 - *p0);
1407  v = (*p1 - *p0) * (*p3 - *p0);
1408  if (u.dot(v) <= 0) {
1409  // p2 and p3 lie on both sides of [p0, p1]
1410  base.init(a, b, c, d);
1411  return true;
1412  }
1413  u = (*p2 - *p1) * (*p0 - *p1);
1414  v = (*p2 - *p1) * (*p3 - *p1);
1415  if (u.dot(v) <= 0) {
1416  // p0 and p3 lie on both sides of [p2, p1]
1417  base.init(b, c, d, a);
1418  return true;
1419  }
1420  base.init(a, c, b, d);
1421  return true;
1422  }
1423 
1424  return false;
1425 }
1426 
1427 // pair of indexes
1428 using IndexPair = std::pair<unsigned, unsigned>;
1429 
1431  ScalarType delta,
1432  const CCVector3* base[4],
1433  std::vector<Base>& results) {
1434  // Compute reference base invariants (r1, r2)
1435  PointCoordinateType r1, r2, d1, d2;
1436  {
1437  const CCVector3* p0 = base[0];
1438  const CCVector3* p1 = base[1];
1439  const CCVector3* p2 = base[2];
1440  const CCVector3* p3 = base[3];
1441 
1442  d1 = (*p1 - *p0).norm();
1443  d2 = (*p3 - *p2).norm();
1444 
1445  CCVector3 inter;
1446  if (!LinesIntersections(*p0, *p1, *p2, *p3, inter, r1, r2)) return 0;
1447  }
1448 
1449  GenericIndexedCloud* cloud = tree->getAssociatedCloud();
1450 
1451  // Find all pairs which are d1-appart and d2-appart
1452  std::vector<IndexPair> pairs1, pairs2;
1453  {
1454  unsigned count = static_cast<unsigned>(cloud->size());
1455  std::vector<unsigned> pointsIndexes;
1456  try {
1457  pointsIndexes.reserve(count);
1458  } catch (...) {
1459  // not enough memory
1460  return -1;
1461  }
1462 
1463  for (unsigned i = 0; i < count; i++) {
1464  const CCVector3* q0 = cloud->getPoint(i);
1465  IndexPair idxPair;
1466  idxPair.first = i;
1467  // Extract all points from the cloud which are d1-appart (up to
1468  // delta) from q0
1469  pointsIndexes.clear();
1470  tree->findPointsLyingToDistance(q0->u, static_cast<ScalarType>(d1),
1471  delta, pointsIndexes);
1472  {
1473  for (std::size_t j = 0; j < pointsIndexes.size(); j++) {
1474  // As ||pi-pj|| = ||pj-pi||, we only take care of pairs that
1475  // verify i<j
1476  if (pointsIndexes[j] > i) {
1477  idxPair.second = pointsIndexes[j];
1478  pairs1.push_back(idxPair);
1479  }
1480  }
1481  }
1482  // Extract all points from the cloud which are d2-appart (up to
1483  // delta) from q0
1484  pointsIndexes.clear();
1485  tree->findPointsLyingToDistance(q0->u, static_cast<ScalarType>(d2),
1486  delta, pointsIndexes);
1487  {
1488  for (std::size_t j = 0; j < pointsIndexes.size(); j++) {
1489  if (pointsIndexes[j] > i) {
1490  idxPair.second = pointsIndexes[j];
1491  pairs2.push_back(idxPair);
1492  }
1493  }
1494  }
1495  }
1496  }
1497 
1498  // Select among the pairs the ones that can be congruent to the base "base"
1499  std::vector<IndexPair> match;
1500  {
1501  PointCloud tmpCloud1, tmpCloud2;
1502  {
1503  unsigned count = static_cast<unsigned>(pairs1.size());
1504  if (!tmpCloud1.reserve(count * 2)) // not enough memory
1505  return -2;
1506  for (unsigned i = 0; i < count; i++) {
1507  // generate the two intermediate points from r1 in pairs1[i]
1508  const CCVector3* q0 = cloud->getPoint(pairs1[i].first);
1509  const CCVector3* q1 = cloud->getPoint(pairs1[i].second);
1510  CCVector3 P1 = *q0 + r1 * (*q1 - *q0);
1511  tmpCloud1.addPoint(P1);
1512  CCVector3 P2 = *q1 + r1 * (*q0 - *q1);
1513  tmpCloud1.addPoint(P2);
1514  }
1515  }
1516 
1517  {
1518  unsigned count = static_cast<unsigned>(pairs2.size());
1519  if (!tmpCloud2.reserve(count * 2)) // not enough memory
1520  return -3;
1521  for (unsigned i = 0; i < count; i++) {
1522  // generate the two intermediate points from r2 in pairs2[i]
1523  const CCVector3* q0 = cloud->getPoint(pairs2[i].first);
1524  const CCVector3* q1 = cloud->getPoint(pairs2[i].second);
1525  CCVector3 P1 = *q0 + r2 * (*q1 - *q0);
1526  tmpCloud2.addPoint(P1);
1527  CCVector3 P2 = *q1 + r2 * (*q0 - *q1);
1528  tmpCloud2.addPoint(P2);
1529  }
1530  }
1531 
1532  // build kdtree for nearest neighbour fast research
1533  KDTree intermediateTree;
1534  if (!intermediateTree.buildFromCloud(&tmpCloud1)) return -4;
1535 
1536  // Find matching (up to delta) intermediate points in tmpCloud1 and
1537  // tmpCloud2
1538  {
1539  unsigned count = static_cast<unsigned>(tmpCloud2.size());
1540  match.reserve(count);
1541  if (match.capacity() < count) // not enough memory
1542  return -5;
1543 
1544  for (unsigned i = 0; i < count; i++) {
1545  const CCVector3* q0 = tmpCloud2.getPoint(i);
1546  unsigned a;
1547  if (intermediateTree.findNearestNeighbour(q0->u, a, delta)) {
1548  IndexPair idxPair;
1549  idxPair.first = i;
1550  idxPair.second = a;
1551  match.push_back(idxPair);
1552  }
1553  }
1554  }
1555  }
1556 
1557  // Find bases from matching intermediate points indexes
1558  {
1559  results.resize(0);
1560  std::size_t count = match.size();
1561  if (count > 0) {
1562  try {
1563  results.reserve(count);
1564  } catch (const std::bad_alloc&) {
1565  // not enough memory
1566  return -6;
1567  }
1568  for (std::size_t i = 0; i < count; i++) {
1569  Base quad;
1570  unsigned b = match[i].second / 2;
1571  if ((match[i].second % 2) == 0) {
1572  quad.a = pairs1[b].first;
1573  quad.b = pairs1[b].second;
1574  } else {
1575  quad.a = pairs1[b].second;
1576  quad.b = pairs1[b].first;
1577  }
1578 
1579  unsigned a = match[i].first / 2;
1580  if ((match[i].first % 2) == 0) {
1581  quad.c = pairs2[a].first;
1582  quad.d = pairs2[a].second;
1583  } else {
1584  quad.c = pairs2[a].second;
1585  quad.d = pairs2[a].first;
1586  }
1587  results.push_back(quad);
1588  }
1589  }
1590  }
1591 
1592  return static_cast<int>(results.size());
1593 }
1594 
1596  const CCVector3& p1,
1597  const CCVector3& p2,
1598  const CCVector3& p3,
1599  CCVector3& inter,
1600  PointCoordinateType& lambda,
1601  PointCoordinateType& mu) {
1602  CCVector3 p02, p32, p10, A, B;
1603  PointCoordinateType num, denom;
1604 
1605  // Find lambda and mu such that :
1606  // A = p0+lambda(p1-p0)
1607  // B = p2+mu(p3-p2)
1608  //(lambda, mu) = argmin(||A-B||²)
1609  p02 = p0 - p2;
1610  p32 = p3 - p2;
1611  p10 = p1 - p0;
1612  num = (p02.dot(p32) * p32.dot(p10)) - (p02.dot(p10) * p32.dot(p32));
1613  denom = (p10.dot(p10) * p32.dot(p32)) - (p32.dot(p10) * p32.dot(p10));
1614  if (std::abs(denom) < 0.00001) return false;
1615  lambda = num / denom;
1616  num = p02.dot(p32) + (lambda * p32.dot(p10));
1617  denom = p32.dot(p32);
1618  if (std::abs(denom) < 0.00001) return false;
1619  mu = num / denom;
1620  A.x = p0.x + (lambda * p10.x);
1621  A.y = p0.y + (lambda * p10.y);
1622  A.z = p0.z + (lambda * p10.z);
1623  B.x = p2.x + (mu * p32.x);
1624  B.y = p2.y + (mu * p32.y);
1625  B.z = p2.z + (mu * p32.z);
1626  inter.x = (A.x + B.x) / 2.0f;
1627  inter.y = (A.y + B.y) / 2.0f;
1628  inter.z = (A.z + B.z) / 2.0f;
1629 
1630  return true;
1631 }
1632 
1634  GenericIndexedCloud* modelCloud,
1635  GenericIndexedCloud* dataCloud,
1636  Base& reference,
1637  std::vector<Base>& candidates,
1638  unsigned nbMaxCandidates,
1639  std::vector<ScaledTransformation>& transforms) {
1640  std::vector<Base> table;
1641  std::vector<float> scores, sortedscores;
1642  const CCVector3* p[4];
1644  std::vector<ScaledTransformation> tarray;
1645  PointCloud referenceBaseCloud, dataBaseCloud;
1646 
1647  unsigned candidatesCount = static_cast<unsigned>(candidates.size());
1648  if (candidatesCount == 0) return false;
1649 
1650  bool filter = (nbMaxCandidates > 0 && candidatesCount > nbMaxCandidates);
1651  {
1652  try {
1653  table.resize(candidatesCount);
1654  } catch (... /*const std::bad_alloc&*/) // out of memory
1655  {
1656  return false;
1657  }
1658  for (unsigned i = 0; i < candidatesCount; i++)
1659  table[i].copy(candidates[i]);
1660  }
1661 
1662  if (!referenceBaseCloud.reserve(4)) // we never know ;)
1663  return false;
1664 
1665  {
1666  for (unsigned j = 0; j < 4; j++) {
1667  p[j] = modelCloud->getPoint(reference.getIndex(j));
1668  referenceBaseCloud.addPoint(*p[j]);
1669  }
1670  }
1671 
1672  try {
1673  scores.reserve(candidatesCount);
1674  sortedscores.reserve(candidatesCount);
1675  tarray.reserve(candidatesCount);
1676  transforms.reserve(candidatesCount);
1677  } catch (... /*const std::bad_alloc&*/) // out of memory
1678  {
1679  return false;
1680  }
1681 
1682  // enough memory?
1683  if (scores.capacity() < candidatesCount ||
1684  sortedscores.capacity() < candidatesCount ||
1685  tarray.capacity() < candidatesCount ||
1686  transforms.capacity() < candidatesCount) {
1687  return false;
1688  }
1689 
1690  {
1691  for (unsigned i = 0; i < table.size(); i++) {
1692  dataBaseCloud.reset();
1693  if (!dataBaseCloud.reserve(4)) // we never know ;)
1694  return false;
1695  for (unsigned j = 0; j < 4; j++)
1696  dataBaseCloud.addPoint(
1697  *dataCloud->getPoint(table[i].getIndex(j)));
1698 
1700  &dataBaseCloud, &referenceBaseCloud, t, false))
1701  return false;
1702 
1703  tarray.push_back(t);
1704  if (filter) {
1705  float score = 0;
1706  GenericIndexedCloud* b =
1708  &dataBaseCloud, t);
1709  if (!b) return false; // not enough memory
1710  for (unsigned j = 0; j < 4; j++) {
1711  const CCVector3* q = b->getPoint(j);
1712  score += static_cast<float>((*q - *(p[j])).norm());
1713  }
1714  delete b;
1715  scores.push_back(score);
1716  sortedscores.push_back(score);
1717  }
1718  }
1719  }
1720 
1721  if (filter) {
1722  transforms.resize(0);
1723  try {
1724  candidates.resize(nbMaxCandidates);
1725  } catch (... /*const std::bad_alloc&*/) // out of memory
1726  {
1727  return false;
1728  }
1729 
1730  // Sort the scores in ascending order and only keep the nbMaxCandidates
1731  // smallest scores
1732  sort(sortedscores.begin(), sortedscores.end());
1733  float score = sortedscores[nbMaxCandidates - 1];
1734  unsigned j = 0;
1735  for (unsigned i = 0; i < scores.size(); i++) {
1736  if (scores[i] <= score && j < nbMaxCandidates) {
1737  candidates[i].copy(table[i]);
1738  transforms.push_back(tarray[i]);
1739  j++;
1740  }
1741  }
1742  } else {
1743  transforms = tarray;
1744  }
1745 
1746  return true;
1747 }
constexpr PointCoordinateType PC_ONE
'1' as a PointCoordinateType value
Definition: CVConst.h:67
constexpr double ZERO_TOLERANCE_D
Definition: CVConst.h:49
Vector3Tpl< PointCoordinateType > CCVector3
Default 3D Vector.
Definition: CVGeom.h:798
float PointCoordinateType
Type of the coordinates of a (N-D) point.
Definition: CVTypes.h:16
double normal[3]
int size
int count
#define ParallelSort
Definition: ParallelSort.h:33
void * X
Definition: SmallVector.cpp:45
core::Tensor result
Definition: VtkUtils.cpp:76
bool copy
Definition: VtkUtils.cpp:74
Garbage container (automatically deletes pointers when destroyed)
Definition: Garbage.h:15
void add(C *item)
Puts an item in the trash.
Definition: Garbage.h:18
void destroy(C *item)
To manually delete an item already in the trash.
Definition: Garbage.h:32
Jacobi eigen vectors/values decomposition.
Definition: Jacobi.h:23
static bool GetMaxEigenValueAndVector(const SquareMatrix &eigenVectors, const EigenValues &eigenValues, Scalar &maxEigenValue, Scalar maxEigenVector[])
Returns the biggest eigenvalue and its associated eigenvector.
Definition: Jacobi.h:385
Type y
Definition: CVGeom.h:137
Type u[3]
Definition: CVGeom.h:139
Type x
Definition: CVGeom.h:137
Type z
Definition: CVGeom.h:137
double normd() const
Returns vector norm (forces double precision output)
Definition: CVGeom.h:426
Type dot(const Vector3Tpl &v) const
Dot product.
Definition: CVGeom.h:408
Type norm2() const
Returns vector square norm.
Definition: CVGeom.h:417
Type norm() const
Returns vector norm.
Definition: CVGeom.h:424
Vector3Tpl cross(const Vector3Tpl &v) const
Cross product.
Definition: CVGeom.h:412
Vector3Tpl< double > toDouble() const
Cast operator to a double vector (explicit call version)
Definition: CVGeom.h:255
static ReferenceCloud * subsampleCloudRandomly(GenericIndexedCloudPersist *cloud, unsigned newNumberOfPoints, GenericProgressCallback *progressCb=nullptr)
Subsamples a point cloud (process based on random selections)
The octree structure used throughout the library.
Definition: DgmOctree.h:39
unsigned char findBestLevelForComparisonWithOctree(const DgmOctree *theOtherOctree) const
Definition: DgmOctree.cpp:2691
int build(GenericProgressCallback *progressCb=nullptr)
Builds the structure.
Definition: DgmOctree.cpp:196
static int computeCloud2MeshDistances(GenericIndexedCloudPersist *pointCloud, GenericIndexedMesh *mesh, Cloud2MeshDistancesComputationParams &params, GenericProgressCallback *progressCb=nullptr, DgmOctree *cloudOctree=nullptr)
Computes the distance between a point cloud and a mesh.
static int computeCloud2CloudDistances(GenericIndexedCloudPersist *comparedCloud, GenericIndexedCloudPersist *referenceCloud, Cloud2CloudDistancesComputationParams &params, GenericProgressCallback *progressCb=nullptr, DgmOctree *compOctree=nullptr, DgmOctree *refOctree=nullptr)
static bool FilterCandidates(GenericIndexedCloud *modelCloud, GenericIndexedCloud *dataCloud, Base &reference, std::vector< Base > &candidates, unsigned nbMaxCandidates, std::vector< ScaledTransformation > &transforms)
static int FindCongruentBases(KDTree *tree, ScalarType delta, const CCVector3 *base[4], std::vector< Base > &results)
static bool RegisterClouds(GenericIndexedCloud *modelCloud, GenericIndexedCloud *dataCloud, ScaledTransformation &transform, ScalarType delta, ScalarType beta, PointCoordinateType overlap, unsigned nbBases, unsigned nbTries, GenericProgressCallback *progressCb=nullptr, unsigned nbMaxCandidates=0)
Registers two point clouds.
static bool LinesIntersections(const CCVector3 &p0, const CCVector3 &p1, const CCVector3 &p2, const CCVector3 &p3, CCVector3 &inter, PointCoordinateType &lambda, PointCoordinateType &mu)
Find the 3D pseudo intersection between two lines.
static unsigned ComputeRegistrationScore(KDTree *modelTree, GenericIndexedCloud *dataCloud, ScalarType delta, const ScaledTransformation &dataToModel)
Registration score computation function.
static bool FindBase(GenericIndexedCloud *cloud, PointCoordinateType overlap, unsigned nbTries, Base &base)
Randomly finds a 4 points base in a cloud.
virtual void getBoundingBox(CCVector3 &bbMin, CCVector3 &bbMax)=0
Returns the cloud bounding box.
virtual void placeIteratorAtBeginning()=0
Sets the cloud iterator at the beginning.
virtual const CCVector3 * getNextPoint()=0
Returns the next point (relatively to the global iterator position)
virtual unsigned size() const =0
Returns the number of points.
virtual bool isValid() const
Indicates if the distribution parameters are valid.
A generic 3D point cloud with index-based and presistent access to points.
A generic 3D point cloud with index-based point access.
virtual const CCVector3 * getPoint(unsigned index) const =0
Returns the ith point.
A generic mesh with index-based vertex access.
virtual void stop()=0
Notifies the fact that the process has ended.
virtual void setInfo(const char *infoStr)=0
Notifies some information about the ongoing process.
virtual void setMethodTitle(const char *methodTitle)=0
Notifies the algorithm title.
virtual bool textCanBeEdited() const
Returns whether the dialog title and info can be updated or not.
virtual void update(float percent)=0
Notifies the algorithm progress.
virtual bool isCancelRequested()=0
Checks if the process should be canceled.
static SquareMatrixd ComputeWeightedCrossCovarianceMatrix(GenericCloud *P, GenericCloud *Q, const CCVector3 &pGravityCenter, const CCVector3 &qGravityCenter, ScalarField *coupleWeights=nullptr)
static CCVector3 ComputeGravityCenter(const GenericCloud *theCloud)
Computes the gravity center of a point cloud.
static CCVector3 ComputeWeightedGravityCenter(GenericCloud *theCloud, ScalarField *weights)
Computes the weighted gravity center of a point cloud.
static SquareMatrixd ComputeCrossCovarianceMatrix(GenericCloud *P, GenericCloud *Q, const CCVector3 &pGravityCenter, const CCVector3 &qGravityCenter)
Computes the cross covariance matrix between two clouds (same size)
static bool FindAbsoluteOrientation(GenericCloud *lCloud, GenericCloud *rCloud, ScaledTransformation &trans, bool fixedScale=false)
static double ComputeRMS(GenericCloud *lCloud, GenericCloud *rCloud, const ScaledTransformation &trans)
Computes RMS between two clouds given a transformation and a scale.
static RESULT_TYPE Register(GenericIndexedCloudPersist *modelCloud, GenericIndexedMesh *modelMesh, GenericIndexedCloudPersist *dataCloud, const Parameters &params, ScaledTransformation &totalTrans, double &finalRMS, unsigned &finalPointCount, GenericProgressCallback *progressCb=nullptr)
Registers two clouds or a cloud and a mesh.
unsigned findPointsLyingToDistance(const PointCoordinateType *queryPoint, ScalarType distance, ScalarType tolerance, std::vector< unsigned > &points)
Definition: CVKdTree.cpp:347
bool findNearestNeighbour(const PointCoordinateType *queryPoint, unsigned &nearestPointIndex, ScalarType maxDist)
Nearest point search.
Definition: CVKdTree.cpp:178
bool findPointBelowDistance(const PointCoordinateType *queryPoint, ScalarType maxDist)
Optimized version of nearest point search method.
Definition: CVKdTree.cpp:301
GenericIndexedCloud * getAssociatedCloud() const
Gets the point cloud from which the tree has been build.
Definition: CVKdTree.h:40
bool buildFromCloud(GenericIndexedCloud *cloud, GenericProgressCallback *progressCb=nullptr)
Builds the KD-tree.
Definition: CVKdTree.cpp:23
The Normal/Gaussian statistical distribution.
bool computeParameters(const ScalarContainer &values) override
Computes the distribution parameters from a set of values.
bool getParameters(ScalarType &_mu, ScalarType &_sigma2) const
Returns the distribution parameters.
void reset()
Clears the cloud database.
virtual bool reserve(unsigned newCapacity)
Reserves memory for the point database.
void addPoint(const CCVector3 &P)
Adds a 3D point to the database.
unsigned size() const override
Definition: PointCloudTpl.h:38
const CCVector3 * getPoint(unsigned index) const override
bool resize(unsigned newCount) override
Resizes the point database.
Definition: CVPointCloud.h:41
static PointCloud * applyTransformation(GenericCloud *cloud, Transformation &trans, GenericProgressCallback *progressCb=nullptr)
Applys a geometrical transformation to a point cloud.
A very simple point cloud (no point duplication)
virtual bool addPointIndex(unsigned globalIndex)
Point global index insertion mechanism.
unsigned size() const override
Returns the number of points.
virtual unsigned getPointGlobalIndex(unsigned localIndex) const
virtual bool resize(unsigned n)
Presets the size of the vector used to store point references.
virtual bool reserve(unsigned n)
Reserves some memory for hosting the point references.
PointProjectionTools::Transformation ScaledTransformation
Shortcut to PointProjectionTools::ScaledTransformation.
static bool RegistrationProcedure(GenericCloud *P, GenericCloud *X, ScaledTransformation &trans, bool adjustScale=false, ScalarField *coupleWeights=nullptr, PointCoordinateType aPrioriScale=1.0f, CCVector3 *Gp=nullptr, CCVector3 *Gx=nullptr)
ICP Registration procedure with optional scale estimation.
static void FilterTransformation(const ScaledTransformation &inTrans, int transformationFilters, const CCVector3 &toBeAlignedGravityCenter, const CCVector3 &referenceGravityCenter, ScaledTransformation &outTrans)
A simple scalar field (to be associated to a point cloud)
Definition: ScalarField.h:25
virtual void computeMinAndMax()
Determines the min and max values.
Definition: ScalarField.h:123
void addElement(ScalarType value)
Definition: ScalarField.h:99
ScalarType & getValue(std::size_t index)
Definition: ScalarField.h:92
void setValue(std::size_t index, ScalarType value)
Definition: ScalarField.h:96
bool reserveSafe(std::size_t count)
Reserves memory (no exception thrown)
Definition: ScalarField.cpp:71
bool resizeSafe(std::size_t count, bool initNewElements=false, ScalarType valueForNewElements=0)
Resizes memory (no exception thrown)
Definition: ScalarField.cpp:81
static bool ValidValue(ScalarType value)
Returns whether a scalar value is valid or not.
Definition: ScalarField.h:61
unsigned currentSize() const
Definition: ScalarField.h:100
void toIdentity()
Sets matrix to identity.
Definition: SquareMatrix.h:452
Scalar ** m_values
The matrix rows.
Definition: SquareMatrix.h:157
Scalar trace() const
Returns trace.
Definition: SquareMatrix.h:465
bool isValid() const
Returns matrix validity.
Definition: SquareMatrix.h:138
void invalidate()
Invalidates matrix.
Definition: SquareMatrix.h:143
SquareMatrixTpl transposed() const
Returns the transposed version of this matrix.
Definition: SquareMatrix.h:311
void initFromQuaternion(const float q[])
Creates a rotation matrix from a quaternion (float version)
Definition: SquareMatrix.h:480
void setValue(unsigned row, unsigned column, Scalar value)
Sets a particular matrix value.
Definition: SquareMatrix.h:163
void scale(Scalar coef)
Scales matrix (all elements are multiplied by the same coef.)
Definition: SquareMatrix.h:459
Scalar getValue(unsigned row, unsigned column) const
Returns a particular matrix value.
Definition: SquareMatrix.h:168
std::pair< unsigned, unsigned > IndexPair
a[190]
GraphType data
Definition: graph_cut.cc:138
normal_z y
normal_z x
normal_z z
::ccPointCloud PointCloud
Definition: PointCloud.h:19
Generic file read and write utility for python interface.
SquareMatrixTpl< PointCoordinateType > SquareMatrix
Default CC square matrix type (PointCoordinateType)
bool LessThanEpsilon(float x)
Test a floating point number against our epsilon (a very small number).
Definition: CVMath.h:23
flann::Index< flann::L2< float > > KDTree
ReferenceCloud * cloud
PointCloud * CPSetPlain
ScalarField * weights
PointCloud * rotatedCloud
ReferenceCloud * CPSetRef
GenericIndexedCloudPersist * cloud
ModelCloud(const ModelCloud &m)
ScalarField * weights
Cloud-to-cloud "Hausdorff" distance computation parameters.
ReferenceCloud * CPSet
Container of (references to) points to store the "Closest Point Set".
void init(unsigned _a, unsigned _b, unsigned _c, unsigned _d)
CONVERGENCE_TYPE convType
Convergence type.
int maxThreadCount
Maximum number of threads to use (0 = max)
ScalarField * dataWeights
Weights for data points (optional)
A scaled geometrical transformation (scale + rotation + translation)
CCVector3d apply(const CCVector3d &P) const
Applies the transformation to a point.