ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
qCanupoProcess.cpp
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - CloudViewer: www.cloudViewer.org -
3 // ----------------------------------------------------------------------------
4 // Copyright (c) 2018-2024 www.cloudViewer.org
5 // SPDX-License-Identifier: MIT
6 // ----------------------------------------------------------------------------
7 
8 #include "qCanupoProcess.h"
9 
10 // local
11 #include "qCanupoTools.h"
12 
13 // CLOUDVIEWER
14 #include <ecvMainAppInterface.h>
15 
16 // cloudViewer
17 #include <CloudSamplingTools.h>
18 #include <ReferenceCloud.h>
19 
20 // CV_DB_LIB
21 #include <ecvOctree.h>
22 #include <ecvOctreeProxy.h>
23 #include <ecvPointCloud.h>
24 #include <ecvProgressDialog.h>
25 #include <ecvScalarField.h>
26 
27 // Qt
28 #include <QApplication>
29 #include <QMessageBox>
30 #include <QStringList>
31 
32 // Default SF names
33 #ifdef COMPILE_PRIVATE_CANUPO
34 static const char CANUPO_PER_LEVEL_ROUGHNESS_SF_NAME[] = "CANUPO.roughness";
35 #endif
36 static const char CANUPO_PER_LEVEL_ADDITIONAL_SF_NAME[] = "CANUPO.(x-y)";
37 
38 // Reserved name for CANUPO 'MSC' meta-data
39 static const char s_canupoMSCMetaData[] = "CanupoMSCData";
40 
41 // Tries to refine the classification (returns the new confidence if successful)
42 float RefinePointClassif(const Classifier& classifier,
43  const float confidence,
44  float& distToBoundary,
45  ccPointCloud* cloud,
47  unsigned char octreeLevel,
49  cloudViewer::DgmOctree* corePointsOctree,
50  unsigned char coreOctreeLevel,
51  unsigned coreIndex,
52  PointCoordinateType largestRadius,
53  const std::vector<int>& corePointClasses) {
55  if (!sf) {
56  assert(false);
57  return confidence;
58  }
59 
60  try {
61  // find all scene data around that core point
64  *corePoints->getPoint(coreIndex),
65  largestRadius, // we use the biggest neighborhood
66  neighbors, octreeLevel);
67 
68  // for each scene data point, find the corresponding core point and
69  // check if it is reliable
70  std::vector<ScalarType> class1SFValues, class2SFValues;
71  for (int j = 0; j < n; ++j) {
72  unsigned currentPointIndex = neighbors[j].pointIndex;
73  // look for the nearest 'core point' neighbor
74  unsigned nearestCoreIndex = 0;
75  if (corePoints == cloud) {
76  // if we used the whole cloud as core points, then each point is
77  // a core point!
78  nearestCoreIndex = currentPointIndex;
79  } else {
80  double maxSquareDist = 0;
82  if (corePointsOctree->findPointNeighbourhood(
83  cloud->getPoint(currentPointIndex), &Yk, 1,
84  coreOctreeLevel, maxSquareDist) == 1) {
85  nearestCoreIndex = Yk.getPointGlobalIndex(0);
86  } else {
87  assert(false);
88  continue;
89  }
90  }
91 
92  int corePointClass = corePointClasses[nearestCoreIndex];
93  if (corePointClass != -1) {
94  const ScalarType& sfValue = sf->getValue(currentPointIndex);
95  if (corePointClass == classifier.class1)
96  class1SFValues.push_back(sfValue);
97  else if (corePointClass == classifier.class2)
98  class2SFValues.push_back(sfValue);
99  // else the extra info is irrelevant for this classifier pair
100  }
101  }
102 
103  // some local info? TODO: min size for considering this information is
104  // reliable?
105  size_t nsamples = class1SFValues.size() + class2SFValues.size();
106  if (nsamples == 0) {
107  // nothing to do
108  return confidence;
109  }
110 
111  // only one class?
112  if (class1SFValues.empty()) {
113  distToBoundary = static_cast<float>(class2SFValues.size()) / n;
114  if (distToBoundary < 0.5f) {
115  // too close
116  return confidence;
117  }
118  } else if (class2SFValues.empty()) {
119  distToBoundary = -static_cast<float>(class1SFValues.size()) /
120  n; // dist(class 1) < 0
121  if (distToBoundary > -0.5f) {
122  // too close
123  return confidence;
124  }
125  } else {
126  std::sort(class1SFValues.begin(), class1SFValues.end());
127  std::sort(class2SFValues.begin(), class2SFValues.end());
128 
129  std::vector<ScalarType>* smallestSet = &class1SFValues;
130  std::vector<ScalarType>* largestSet = &class2SFValues;
131 
132  if (class1SFValues.size() >= class2SFValues.size()) {
133  std::swap(smallestSet, largestSet);
134  }
135 
136  std::vector<ScalarType> bestSplit;
137  std::vector<int> bestSplitDir;
138  float bestConfidence = -1.0f;
139 
140  for (size_t k = 0; k < smallestSet->size(); ++k) {
141  size_t nsbelow = 0;
142  size_t nlabove = 0;
143 
144  // dichomed is the first index so that largestSet[dichomed] >=
145  // smallestSet[k]...
146  size_t dichomed =
147  std::upper_bound(largestSet->begin(), largestSet->end(),
148  smallestSet->at(k)) -
149  largestSet->begin();
150  if (dichomed == 0) {
151  nlabove = largestSet->size();
152  nsbelow = k + 1;
153  } else {
154  nlabove = largestSet->size() - dichomed;
155  nsbelow = k;
156  }
157 
158  // classification on either side, take largest and reverse roles
159  // if necessary
160  float c1 = static_cast<float>(nlabove) / largestSet->size() +
161  static_cast<float>(nsbelow) / smallestSet->size();
162  float c2 = static_cast<float>(largestSet->size() - nlabove) /
163  largestSet->size() +
164  static_cast<float>(smallestSet->size() - nsbelow) /
165  smallestSet->size();
166  float conf = std::max(c1, c2);
167  // no need to average for comparison purpose
168  if (bestConfidence < conf) {
169  bestSplit.clear();
170  bestSplitDir.clear();
171  bestConfidence = conf;
172  }
173 
174  if (qCanupoTools::Fpeq<float>(bestConfidence, conf)) {
175  bestSplit.push_back(
176  (smallestSet->at(k) +
177  largestSet->at(std::min(dichomed,
178  largestSet->size() - 1))) /
179  2);
180  bestSplitDir.push_back(c1 <= c2 ? 1 : 0);
181  }
182  }
183 
184  bestConfidence /= 2;
185  // see if we're improving estimated probability or not
186  // TODO: sometimes (rarely) there are mistakes in the reference core
187  // points and we're dealing with similar classes
188  // => put back these core points in the unreliable pool
189  if (/*old*/ confidence < bestConfidence) {
190  // take median best split
191  size_t bsi = bestSplit.size() / 2;
192  distToBoundary = sf->getValue(coreIndex) - bestSplit[bsi];
193  // reverse if necessary
194  if (bestSplitDir[bsi] == 1) distToBoundary = -distToBoundary;
195  // back to original vectors
196  if (class1SFValues.size() >= class2SFValues.size())
197  distToBoundary = -distToBoundary;
198  } else {
199  // no improvement
200  return confidence;
201  }
202  }
203 
204  // update confidence
205  float newConfidence =
206  1.0f / (exp(-fabs(distToBoundary)) + 1.0f); // in [0.5 ; 1]
207  newConfidence = 2 * (newConfidence - 0.5f); // map to [0;1]
208 
209  return newConfidence;
210  } catch (const std::bad_alloc&) {
211  return -1.0f;
212  }
213 
214  assert(false); // we shouldn't arrive here!
215  return confidence;
216 }
217 
219  QString classifierFilename,
220  const ClassifyParams& params,
221  ccPointCloud* cloud,
223  CorePointDescSet& corePointsDescriptors,
224  ccPointCloud* realCorePoints /*=nullptr*/,
225  ecvMainAppInterface* app /*=nullptr*/,
226  QWidget* parentWidget /*=nullptr*/,
227  bool silent /*=false*/) {
228  // core points are mandatory
229  if (!cloud || !corePoints) {
230  assert(false);
231  return false;
232  }
233 
234  // load the classifier file
235  std::vector<Classifier> classifiers;
236  std::vector<float> scales;
237  unsigned descriptorID = DESC_DIMENSIONALITY;
238  {
239  QString error;
240  if (!Classifier::Load(classifierFilename, classifiers, scales, error)) {
241  if (app)
242  app->dispToConsole(QString("An error occured: ") + error,
244  return false;
245  }
246  if (classifiers.empty()) {
247  if (app)
248  app->dispToConsole(QString("Invalid classifier file!"),
250  return false;
251  }
252 
253  // check that the descriptor ID of every classifier is handled by this
254  // version!
255  for (size_t i = 0; i < classifiers.size(); ++i) {
256  if (!ScaleParamsComputer::GetByID(classifiers[i].descriptorID)) {
257  if (app)
258  app->dispToConsole(
259  QString("Unhandled descriptor type! (ID = %1)")
260  .arg(classifiers[i].descriptorID),
262  return false;
263  }
264  if (i != 0 &&
265  classifiers[i].descriptorID != classifiers[0].descriptorID) {
266  if (app)
267  app->dispToConsole(
268  QString("Can't handle mixed descriptor types!"),
270  return false;
271  }
272  }
273  descriptorID = classifiers[0].descriptorID;
274  }
275 
276  // progress dialog
277  ecvProgressDialog pDlg(true, parentWidget);
278 
279  // does the core point cloud has associated meta-data?
280  QVariant mscMetaData;
281  bool useExistingMetaData = false;
282  if (realCorePoints) {
283  mscMetaData = realCorePoints->getMetaData(s_canupoMSCMetaData);
284  if (mscMetaData.isValid()) {
285  bool validMetaData = (mscMetaData.type() == QVariant::ByteArray &&
286  corePointsDescriptors.fromByteArray(
287  mscMetaData.toByteArray()));
288 
289  if (validMetaData) {
290  useExistingMetaData = true;
291  } else {
292  if (app)
293  app->dispToConsole(
294  "[qCanupo] Failed to read core point cloud "
295  "associated MSC meta data?!",
297  mscMetaData.clear();
298  }
299  }
300  }
301 
302 #ifdef COMPILE_PRIVATE_CANUPO
303  // whether to compute per scale roughness for each core point!
304  bool generateRoughnessSF = cDlg.generateRoughnessSFsCheckBox->isChecked();
305  std::vector<ccScalarField*> coreRoughnessSFs;
306 #endif
307 
308  try {
309  for (int step = 0; step < 1; ++step) // fake loop for easy break
310  {
311  // check descriptors' scales (if already loaded)
312  bool computeDescriptors = true;
313  if (!corePointsDescriptors.scales().empty()) {
315  scales, corePointsDescriptors.scales()) <
316  scales.size()) {
317  if (!silent &&
318  QMessageBox::question(
319  parentWidget, "Scales mismatch",
320  "Available descriptors/scales data mismatch "
321  "with classifier's! Compute new descriptors or "
322  "cancel?",
323  QMessageBox::Yes,
324  QMessageBox::Cancel) == QMessageBox::Cancel) {
325  // cancel process
326  break;
327  }
328  } else {
329  computeDescriptors =
330  false; // no need to compute the descriptors as we
331  // already have them!
332  }
333 
334  // check descriptor type!
335  if (!computeDescriptors &&
336  corePointsDescriptors.descriptorID() != descriptorID) {
337  if (!silent &&
338  QMessageBox::question(
339  parentWidget, "Descriptor type mismatch",
340  "Available descriptors have been computed with "
341  "another descriptor type! Compute new "
342  "descriptors or cancel?",
343  QMessageBox::Yes,
344  QMessageBox::Cancel) == QMessageBox::Cancel) {
345  // cancel process
346  break;
347  } else {
348  // force new computation
349  computeDescriptors = true;
350  }
351  }
352  }
353 
354  // compute the original cloud octree
355  ccOctree::Shared octree = cloud->getOctree();
356  if (!octree) {
357  octree = cloud->computeOctree(&pDlg);
358  if (!octree) {
359  if (app)
360  app->dispToConsole(
361  "Failed to compute input cloud octree!",
363  break;
364  } else if (app && cloud->getParent()) {
365  app->addToDB(cloud->getOctreeProxy());
366  }
367  }
368 
369 #ifdef COMPILE_PRIVATE_CANUPO
370  computeDescriptors |= generateRoughnessSF;
371 #endif
372 
373  // let's compute the descriptors
374  if (computeDescriptors) {
375  // check that the selected descriptor (computer) is valid
376  {
377  assert(descriptorID != 0);
379  ScaleParamsComputer::GetByID(descriptorID);
380  if (!computer) {
381  if (app)
382  app->dispToConsole(
383  QString("Internal error: unhandled "
384  "descriptor ID (%1)!")
385  .arg(descriptorID),
387  break;
388  }
389 
390  if (computer->needSF() &&
391  ((realCorePoints &&
392  realCorePoints->getCurrentDisplayedScalarField() ==
393  nullptr) ||
394  (!realCorePoints &&
396  nullptr) // if realCorePoints == 0, it means
397  // that the subsampled cloud
398  // couldn't be converted to a real
399  // cloud!
400  )) {
401  if (app)
402  app->dispToConsole(
403  QString("To compute this type of "
404  "descriptor, the core points cloud "
405  "must have an active scalar "
406  "field!"),
408  break;
409  }
410  }
411 
412 #ifdef COMPILE_PRIVATE_CANUPO
413  // test: create 1 sf per scale with roughness
414  if (generateRoughnessSF) {
415  size_t scaleCount = scales.size();
416  coreRoughnessSFs.resize(scaleCount, 0);
417  // for each scale
418  for (size_t s = 0; s < scaleCount; ++s) {
419  QString sfName =
420  QString(CANUPO_PER_LEVEL_ROUGHNESS_SF_NAME) +
421  QString(" @ scale %1").arg(scales[s]);
422 
423  coreRoughnessSFs[s] =
424  new ccScalarField(qPrintable(sfName));
425  if (!coreRoughnessSFs[s]->resize(corePoints->size(),
426  NAN_VALUE)) {
427  m_app->dispToConsole(
428  "Not enough memory to store per-level "
429  "roughness!",
431  generateRoughnessSF = false;
432  break;
433  }
434  }
435  }
436 #endif
437  // computes the 'descriptors'
438  bool invalidDescriptors = false;
439  QString errorStr;
441  corePoints, corePointsDescriptors, cloud, scales,
442  invalidDescriptors, errorStr, descriptorID,
443  params.maxThreadCount, &pDlg, octree.data()
444 #ifdef COMPILE_PRIVATE_CANUPO
445  ,
446  generateRoughnessSF ? &coreRoughnessSFs : 0
447 #endif
448  )) {
449  if (app)
450  app->dispToConsole(
451  QString("Failed to compute core points "
452  "descriptors: %1")
453  .arg(errorStr),
455  break;
456  } else if (invalidDescriptors) {
457  if (app)
458  app->dispToConsole(
459  "[qCanupo] Some descriptors couldn't be "
460  "computed (min scale may be too small)!",
462  }
463  }
464 
465  // main classification process
466  {
467  // advanced options
468  assert(!params.useActiveSFForConfidence ||
469  cloud->getCurrentDisplayedScalarField() != nullptr);
470 
471  // core points octree
472  cloudViewer::DgmOctree* corePointsOctree = nullptr;
473  if (corePoints == cloud) {
474  corePointsOctree = octree.data();
475  } else {
476  corePointsOctree = new cloudViewer::DgmOctree(corePoints);
477  if (!corePointsOctree->build(&pDlg)) {
478  if (app)
479  app->dispToConsole(
480  "Failed to compute core points octree! "
481  "(not enough memory?)",
483  delete corePointsOctree;
484  corePointsOctree = nullptr;
485  break;
486  }
487  }
488  assert(corePointsOctree);
489 
490  const PointCoordinateType& largestRadius =
491  scales.front() / 2; // largest scale is the first by
492  // construction in canupo
493  unsigned char coreOctreeLevel =
494  corePointsOctree
496  params.samplingDist != 0
497  ? params.samplingDist
498  : largestRadius);
499  unsigned char octreeLevel =
501  largestRadius);
502 
503  // core points class labels
504  std::vector<int> corePointClasses;
505  // core points confidence values
506  std::vector<float> corePointConfidences;
507 
508  // set the class of all core points that are far enough from
509  // hyperplane
510  bool processCanceled = false;
511 
512  size_t corePointCount = corePointsDescriptors.size();
513 
514  corePointClasses.resize(corePointCount, -1);
515  corePointConfidences.resize(corePointCount, 0.0f);
516 
517  // number of points that couldn't be classified
518  std::vector<unsigned> pendingPoints(corePointCount);
519  {
520  for (size_t i = 0; i < corePointCount; ++i)
521  pendingPoints[i] = static_cast<unsigned>(i);
522  }
523  std::vector<unsigned> unreliablePointIndexes;
524 
527  assert(!params.useActiveSFForConfidence || sf);
528 
529  // while unreliable points remain
530  while (!pendingPoints.empty()) {
531  // progress notification
532  pDlg.reset();
533  pDlg.setInfo(QObject::tr("Remaining points to classify: "
534  "%1\nSource points: %2")
535  .arg(pendingPoints.size())
536  .arg(cloud->size()));
537  pDlg.setMethodTitle(QObject::tr("Classification"));
539  &pDlg, corePoints->size());
540  pDlg.start();
541 
542  for (size_t i = 0; i < pendingPoints.size(); ++i) {
543  unsigned coreIndex = pendingPoints[i];
544  CorePointDesc& coreDesc =
545  corePointsDescriptors[coreIndex];
546 
547  // most common case
548  if (classifiers.size() == 1) {
549  const Classifier& classifier = classifiers.front();
550  float distToBoundary =
551  classifier.classify(coreDesc);
552 
553  float confidence =
554  1.0f / (exp(-fabs(distToBoundary)) +
555  1.0f); // in [0.5 ; 1]
556  confidence =
557  2 * (confidence - 0.5f); // map to [0;1]
558 
559  // unreliable point
560  bool unreliable = false;
561  if (confidence < params.confidenceThreshold) {
562  unreliable = true;
563  if (params.useActiveSFForConfidence) {
564  // use the scalar field to refine the
565  // classification
566  float newConfidence = RefinePointClassif(
567  classifier, confidence,
568  distToBoundary, cloud,
569  octree.data(), octreeLevel,
570  corePoints, corePointsOctree,
571  coreOctreeLevel, coreIndex,
572  largestRadius, corePointClasses);
573 
574  if (newConfidence < 0) {
575  if (app)
576  app->dispToConsole(
577  "Internal error: failed to "
578  "refine the classification "
579  "on the boundary (not "
580  "enough memory)",
582  ERR_CONSOLE_MESSAGE);
583  return false;
584  } else if (newConfidence > confidence) {
585  unreliable = false;
586  confidence = newConfidence;
587  }
588  }
589  }
590 
591  if (!unreliable) {
592  int theClass = (distToBoundary >= 0
593  ? classifier.class2
594  : classifier.class1);
595  corePointClasses[i] = theClass;
596  corePointConfidences[i] = confidence;
597  } else if (params.useActiveSFForConfidence) {
598  // this point can't be classified this way
599  unreliablePointIndexes.push_back(
600  static_cast<unsigned>(i));
601  }
602  } else // more than one classifier
603  {
604  std::map<int, int> votes;
605  std::map<int, float> minConfidences;
606 
607  // apply all classifiers and look for the most
608  // represented class
609  for (std::vector<Classifier>::const_iterator
610  classifierIt = classifiers.begin();
611  classifierIt != classifiers.end();
612  ++classifierIt) {
613  const Classifier& classifier = *classifierIt;
614 
615  // uniformize the order, distToBoundary>0
616  // selects the larger class of both
617  float distToBoundary = classifier.classify(
618  coreDesc); // DGM: the descriptors may
619  // have more values than the
620  // number of scales!
621  // if (classifier.class1 > classifier.class2)
622  // distToBoundary = -distToBoundary;
623 
624  // int minclass = std::min(classifier.class1,
625  // classifier.class2); int maxclass =
626  // std::max(classifier.class1,
627  // classifier.class2);
628 
629  // int minclass = classifier.class1;
630  // int maxclass = classifier.class2;
631 
632  float confidence =
633  1.0f / (exp(-fabs(distToBoundary)) +
634  1.0f); // in [0.5 ; 1]
635  confidence = 2 * (confidence -
636  0.5f); // map to [0;1]
637 
638  // unreliable point
639  if (confidence < params.confidenceThreshold) {
640  bool unreliable = true;
641  if (params.useActiveSFForConfidence) {
642  // use the scalar field to refine the
643  // classification
644  float newConfidence =
646  classifier, confidence,
647  distToBoundary, cloud,
648  octree.data(),
650  corePointsOctree,
651  coreOctreeLevel,
652  coreIndex,
653  largestRadius,
654  corePointClasses);
655 
656  if (newConfidence < 0) {
657  if (app)
658  app->dispToConsole(
659  "Internal error: "
660  "failed to refine the "
661  "classification on the "
662  "boundary (not enough "
663  "memory)",
665  ERR_CONSOLE_MESSAGE);
666  return false;
667  } else if (newConfidence > confidence) {
668  unreliable = false;
669  confidence = newConfidence;
670  }
671  }
672 
673  if (unreliable) {
674  votes.clear();
675  break;
676  }
677  }
678 
679  int theClass = (distToBoundary >= 0
680  ? classifier.class2
681  : classifier.class1);
682  ++votes[theClass];
683 
684  // maintain the min confidence for each class
685  if (minConfidences.find(theClass) ==
686  minConfidences.end()) {
687  minConfidences[theClass] = confidence;
688  } else {
689  if (confidence < minConfidences[theClass])
690  minConfidences[theClass] = confidence;
691  }
692  }
693 
694  if (!votes.empty()) {
695  // search for max vote
696  std::vector<int> bestClasses;
697  int maxVoteCount = -1;
698  for (auto& vote : votes) {
699  int voteCount = vote.second;
700  if (maxVoteCount < voteCount) {
701  bestClasses.clear();
702  bestClasses.push_back(vote.first);
703  maxVoteCount = voteCount;
704  } else if (maxVoteCount == voteCount) {
705  bestClasses.push_back(vote.first);
706  }
707  }
708 
709  // in case of equality, use the distances to the
710  // decision boundary
711  int bestClassLabel = bestClasses.front();
712  if (bestClasses.size() > 1) {
713  for (size_t j = 1; j < bestClasses.size();
714  ++j) {
715  if (minConfidences[bestClasses[j]] >
716  minConfidences[bestClassLabel])
717  bestClassLabel = bestClasses[j];
718  }
719  }
720 
721  corePointClasses[i] = bestClassLabel;
722  corePointConfidences[i] =
723  minConfidences[bestClassLabel];
724  } else if (params.useActiveSFForConfidence) {
725  // this point can't be classified this way
726  unreliablePointIndexes.push_back(
727  static_cast<unsigned>(i));
728  }
729  }
730 
731  // progress notification
732  if (!nProgress.oneStep()) {
733  processCanceled = true;
734  break;
735  }
736  }
737 
738  // nothing has changed?
739  if (pendingPoints.size() == unreliablePointIndexes.size()) {
740  break;
741  }
742 
743  pendingPoints = unreliablePointIndexes;
744  unreliablePointIndexes.clear();
745  }
746 
747  if (processCanceled) {
748  if (app)
749  app->dispToConsole(
750  "[qCanupo] Process cancelled by user!",
752  break;
753  }
754 
755  // eventually label the points
756  {
757  // instantiate the scalar fields
758  cloudViewer::ScalarField* classLabelSF = nullptr;
759  int classLabelSFIdx = -1;
760  {
761  classLabelSFIdx = cloud->getScalarFieldIndexByName(
762  "CANUPO.class");
763  if (classLabelSFIdx < 0)
764  classLabelSFIdx =
765  cloud->addScalarField("CANUPO.class");
766  if (classLabelSFIdx >= 0) {
767  classLabelSF =
768  cloud->getScalarField(classLabelSFIdx);
769  classLabelSF->fill(NAN_VALUE);
770  } else {
771  if (app)
772  app->dispToConsole("Not enough memory!",
774  ERR_CONSOLE_MESSAGE);
775  break;
776  }
777  }
778 
779  cloudViewer::ScalarField* confidenceSF = nullptr;
780  int confidenceSFIdx = -1;
781  {
782  confidenceSFIdx = cloud->getScalarFieldIndexByName(
783  "CANUPO.confidence");
784  if (confidenceSFIdx < 0)
785  confidenceSFIdx =
786  cloud->addScalarField("CANUPO.confidence");
787  if (confidenceSFIdx >= 0) {
788  confidenceSF =
789  cloud->getScalarField(confidenceSFIdx);
790  confidenceSF->fill(NAN_VALUE);
791  } else if (app) {
792  app->dispToConsole(
793  "[qCanupo] Not enough memory to store the "
794  "confidence values!",
796  }
797  }
798 
799  // optional: create 1 sf per scale with 'x-y'
800  std::vector<ccScalarField*> scaleSFs;
801  bool generateAdditionalSF = params.generateAdditionalSF;
802  if (generateAdditionalSF &&
803  corePointsDescriptors.dimPerScale() != 2) {
804  if (app)
805  app->dispToConsole(
806  "[qCanupo] Per-level 'x-y' values can only "
807  "be extracted from descriptor with 2 "
808  "dimensions per scale!",
810  generateAdditionalSF = false;
811  }
812 
813  if (generateAdditionalSF) {
814  // remove any previously generated scalar field starting
815  // by CANUPO_PER_LEVEL_ADDITIONAL_SF_NAME
816  {
817  QStringList toDelete;
818  for (unsigned i = 0;
819  i < cloud->getNumberOfScalarFields(); ++i) {
820  QString sfName(
821  cloud->getScalarField(i)->getName());
822  if (sfName.startsWith(
824  toDelete << sfName;
825  }
826 
827  for (int j = 0; j < toDelete.size(); ++j)
828  cloud->deleteScalarField(
829  realCorePoints
831  qPrintable(
832  toDelete[j])));
833  }
834 
835  size_t scaleCount = scales.size();
836  scaleSFs.resize(scaleCount, nullptr);
837  // for each scale
838  for (size_t s = 0; s < scaleCount; ++s) {
839  QString sfName =
841  QString(" @ scale %1").arg(scales[s]);
842 
843  // SF with same name (if any) should have already
844  // been removed!
845  assert(cloud->getScalarFieldIndexByName(
846  qPrintable(sfName)) < 0);
847 
848  scaleSFs[s] = new ccScalarField(qPrintable(sfName));
849  if (!scaleSFs[s]->resizeSafe(cloud->size(), true,
850  NAN_VALUE)) {
851  if (app)
852  app->dispToConsole(
853  "Not enough memory to store "
854  "per-level 'x-y' values!",
856  ERR_CONSOLE_MESSAGE);
857  while (!scaleSFs.empty()) {
858  scaleSFs.back()->release();
859  scaleSFs.pop_back();
860  }
861  generateAdditionalSF = false;
862  break;
863  }
864  }
865  }
866 
867 #ifdef COMPILE_PRIVATE_CANUPO
868  // optional: create 1 sf per scale with roughness
869  std::vector<ccScalarField*> roughnessSFs;
870  if (generateRoughnessSF) {
871  // remove any previously generated scalar field starting
872  // by CANUPO_PER_LEVEL_ROUGHNESS_SF_NAME
873  {
874  QStringList toDelete;
875  for (unsigned i = 0;
876  i < realCorePoints->getNumberOfScalarFields();
877  ++i) {
878  QString sfName(
879  cloud->getScalarField(i)->getName());
880  if (sfName.startsWith(
881  CANUPO_PER_LEVEL_ROUGHNESS_SF_NAME))
882  toDelete << sfName;
883  }
884 
885  for (int j = 0; j < toDelete.size(); ++j)
886  cloud->deleteScalarField(
888  qPrintable(toDelete[j])));
889  }
890 
891  // if the output cloud has the same number of points as
892  // the core points cloud, no need to duplicate the
893  // scalar fields!
894  if (corePoints->size() == cloud->size()) {
895  for (size_t s = 0; s < coreRoughnessSFs.size();
896  ++s) {
897  // SF with same name (if any) should have
898  // already been removed!
899  assert(cloud->getScalarFieldIndexByName(
900  coreRoughnessSFs[s]->getName()) <
901  0);
902 
903  coreRoughnessSFs[s]->computeMinAndMax();
904  cloud->addScalarField(coreRoughnessSFs[s]);
905  }
906  coreRoughnessSFs.clear(); // don't want to release
907  // them anymore!
908  generateRoughnessSF =
909  false; // no need to bother anymore
910  } else {
911  size_t scaleCount = scales.size();
912  roughnessSFs.resize(scaleCount, 0);
913  assert(coreRoughnessSFs.size() ==
914  roughnessSFs.size());
915  // for each scale
916  for (size_t s = 0; s < scaleCount; ++s) {
917  // same name as the per-core points version
918  roughnessSFs[s] = new ccScalarField(
919  coreRoughnessSFs[s]->getName());
920 
921  // SF with same name (if any) should have
922  // already been removed!
923  assert(cloud->getScalarFieldIndexByName(
924  roughnessSFs[s]->getName()) < 0);
925 
926  if (!roughnessSFs[s]->resize(cloud->size(),
927  NAN_VALUE)) {
928  if (app)
929  app->dispToConsole(
930  "Not enough memory to store "
931  "per-level roughness!",
933  ERR_CONSOLE_MESSAGE);
934  while (!roughnessSFs.empty()) {
935  roughnessSFs.back()->release();
936  roughnessSFs.pop_back();
937  }
938  generateRoughnessSF = false;
939  break;
940  }
941  }
942  }
943  }
944 #endif
945 
946  // progress notification
947  pDlg.reset();
948  pDlg.setInfo(
949  QObject::tr("Core points: %1\nSource points: %2")
950  .arg(corePoints->size())
951  .arg(cloud->size()));
952  pDlg.setMethodTitle(QObject::tr("Labelling"));
954  cloud->size());
955  pDlg.start();
956 
957  bool error = false;
959  for (unsigned i = 0; i < cloud->size(); ++i) {
960  const CCVector3* P = cloud->getPoint(i);
961  // process this point
962  // first look for the nearest neighbor in core points
963  unsigned nearestCorePointIndex = 0;
964  if (corePoints == cloud) {
965  // if we used the whole cloud as core points, then
966  // each point is a core point
967  nearestCorePointIndex = i;
968  } else {
969  double maxSquareDist = 0;
970  Yk.clear(false);
971 
972  assert(corePointsOctree);
973  if (corePointsOctree->findPointNeighbourhood(
974  P, &Yk, 1, coreOctreeLevel,
975  maxSquareDist) == 1) {
976  nearestCorePointIndex =
977  Yk.getPointGlobalIndex(0);
978  } else {
979  assert(false);
980  error = true;
981  break;
982  }
983  }
984 
985  assert(classLabelSF);
986  ScalarType classVal = static_cast<ScalarType>(
987  corePointClasses[nearestCorePointIndex]);
988  if (classVal >= 0) classLabelSF->setValue(i, classVal);
989  // otherwise, it is already defaulted to NaN
990 
991  if (confidenceSF) {
992  ScalarType confVal = static_cast<ScalarType>(
993  corePointConfidences
994  [nearestCorePointIndex]);
995  confidenceSF->setValue(i, confVal);
996  }
997 
998  // save 'x-y' values
999  if (generateAdditionalSF) {
1000  unsigned dimPerScale =
1001  corePointsDescriptors.dimPerScale();
1002  assert(dimPerScale == 2);
1003  const CorePointDesc& desc = corePointsDescriptors
1004  [nearestCorePointIndex];
1005  assert(desc.params.size() ==
1006  scaleSFs.size() * dimPerScale);
1007  for (size_t s = 0; s < scaleSFs.size(); ++s) {
1008  ScalarType val =
1009  (desc.params[s * dimPerScale] -
1010  desc.params[s * dimPerScale + 1]);
1011  scaleSFs[s]->setValue(i, val);
1012  }
1013  }
1014 
1015 #ifdef COMPILE_PRIVATE_CANUPO
1016  // save roughness values
1017  if (generateRoughnessSF) {
1018  const CorePointDesc& desc = corePointsDescriptors
1019  [nearestCorePointIndex];
1020  assert(coreRoughnessSFs.size() ==
1021  roughnessSFs.size());
1022  for (size_t s = 0; s < roughnessSFs.size(); ++s) {
1023  const ScalarType& val =
1024  coreRoughnessSFs[s]->getValue(
1025  nearestCorePointIndex);
1026  roughnessSFs[s]->setValue(i, val);
1027  }
1028  }
1029 #endif
1030 
1031  // progress notification
1032  if (!nProgress.oneStep()) {
1033  processCanceled = true;
1034  break;
1035  }
1036  }
1037 
1038  if (error) {
1039  if (app)
1040  app->dispToConsole(
1041  "Internal error: failed to get nearest "
1042  "core points?!",
1044  cloud->deleteScalarField(classLabelSFIdx);
1045  if (confidenceSFIdx >= 0)
1046  cloud->deleteScalarField(confidenceSFIdx);
1047  while (!scaleSFs.empty()) {
1048  scaleSFs.back()->release();
1049  scaleSFs.pop_back();
1050  }
1051  } else {
1052  classLabelSF->computeMinAndMax();
1053  cloud->setCurrentDisplayedScalarField(classLabelSFIdx);
1054 
1055  if (confidenceSF) confidenceSF->computeMinAndMax();
1056 
1057  if (generateAdditionalSF) {
1058  for (auto& scaleSF : scaleSFs) {
1059  scaleSF->computeMinAndMax();
1060  scaleSF->setSymmetricalScale(true);
1061  cloud->addScalarField(scaleSF);
1062  }
1063  }
1064 
1065 #ifdef COMPILE_PRIVATE_CANUPO
1066  if (generateRoughnessSF) {
1067  for (size_t s = 0; s < roughnessSFs.size(); ++s) {
1068  roughnessSFs[s]->computeMinAndMax();
1069  cloud->addScalarField(roughnessSFs[s]);
1070  }
1071  }
1072 #endif
1073  }
1074  cloud->showSF(cloud->getCurrentDisplayedScalarField() !=
1075  nullptr);
1076  }
1077 
1078  // dispose of octree
1079  if (corePointsOctree != octree) {
1080  delete corePointsOctree;
1081  corePointsOctree = nullptr;
1082  }
1083 
1084  // save MSC data as meta-data on the core point cloud
1085  if (realCorePoints && !useExistingMetaData) {
1086  bool proceed = true;
1087  if (mscMetaData.isValid()) {
1088  proceed = (silent ||
1089  QMessageBox::question(
1090  parentWidget,
1091  "Overwrite MSC meta-data?",
1092  "Core points cloud already has "
1093  "associated MSC meta-data, should "
1094  "we overwrite them?",
1095  QMessageBox::Yes, QMessageBox::No) ==
1096  QMessageBox::Yes);
1097  }
1098 
1099  if (proceed) {
1100  mscMetaData = corePointsDescriptors.toByteArray();
1101  if (mscMetaData.isValid()) {
1102  realCorePoints->setMetaData(s_canupoMSCMetaData,
1103  mscMetaData);
1104  if (app)
1105  app->dispToConsole(
1106  QString("[qCanupo] MSC descriptors "
1107  "have been saved as meta-data "
1108  "(cloud '%1')")
1109  .arg(realCorePoints
1110  ->getName()));
1111  } else {
1112  if (app)
1113  app->dispToConsole(
1114  "[qCanupo] Failed to save MSC "
1115  "meta-dataa (not enough memory?)",
1117  WRN_CONSOLE_MESSAGE);
1118  }
1119  }
1120  }
1121  }
1122 
1123  break;
1124  }
1125  } catch (const std::bad_alloc&) {
1126  if (app)
1127  app->dispToConsole("Not enough memory",
1129  return false;
1130  }
1131 
1132 #ifdef COMPILE_PRIVATE_CANUPO
1133  // release roughness SFs (if any)
1134  while (!coreRoughnessSFs.empty()) {
1135  coreRoughnessSFs.back()->release();
1136  coreRoughnessSFs.pop_back();
1137  }
1138 #endif
1139 
1140  return true;
1141 }
constexpr ScalarType NAN_VALUE
NaN as a ScalarType value.
Definition: CVConst.h:76
float PointCoordinateType
Type of the coordinates of a (N-D) point.
Definition: CVTypes.h:16
cmdLineReadable * params[]
static const unsigned DESC_DIMENSIONALITY
Classifier.
Definition: classifier.h:23
static bool Load(QString filename, std::vector< Classifier > &classifiers, std::vector< float > &scales, QString &error, FileHeader *header=0, bool headerOnly=false)
Loads a CANUPO's classifier file (.prm)
Definition: classifier.cpp:157
int class1
Definition: classifier.h:70
float classify(const CorePointDesc &mscdata) const
Classification in MSC space.
Definition: classifier.cpp:152
int class2
Definition: classifier.h:70
Set of (core) point descriptors.
const std::vector< float > & scales() const
Returns associated scales.
const unsigned descriptorID() const
Returns associated descriptor ID.
const unsigned dimPerScale() const
Returns the number of dimensions per scale.
QByteArray toByteArray() const
Converts structure to a byte array.
bool fromByteArray(const QByteArray &data)
Inits structure from a byte array.
Generic parameters 'computer' class (at a given scale)
static ScaleParamsComputer * GetByID(unsigned descID)
Vault: returns the computer corresponding to the given ID.
virtual bool needSF() const
Returns whether the computer requires a scalar field or not.
virtual void showSF(bool state)
Sets active scalarfield visibility.
virtual ccOctree::Shared computeOctree(cloudViewer::GenericProgressCallback *progressCb=nullptr, bool autoAddChild=true)
Computes the cloud octree.
virtual ccOctreeProxy * getOctreeProxy() const
Returns the associated octree proxy (if any)
virtual ccOctree::Shared getOctree() const
Returns the associated octree (if any)
ccHObject * getParent() const
Returns parent object.
Definition: ecvHObject.h:245
void setMetaData(const QString &key, const QVariant &data)
Sets a meta-data element.
QVariant getMetaData(const QString &key) const
Returns a given associated meta data.
Octree structure.
Definition: ecvOctree.h:27
QSharedPointer< ccOctree > Shared
Shared pointer.
Definition: ecvOctree.h:32
A 3D cloud and its associated features (color, normals, scalar fields, etc.)
void setCurrentDisplayedScalarField(int index)
Sets the currently displayed scalar field.
int addScalarField(const char *uniqueName) override
Creates a new scalar field and registers it.
void deleteScalarField(int index) override
Deletes a specific scalar field.
ccScalarField * getCurrentDisplayedScalarField() const
Returns the currently displayed scalar (or 0 if none)
A scalar field associated to display-related parameters.
The octree structure used throughout the library.
Definition: DgmOctree.h:39
unsigned findPointNeighbourhood(const CCVector3 *_queryPoint, ReferenceCloud *Yk, unsigned maxNumberOfNeighbors, unsigned char level, double &maxSquareDist, double maxSearchDist=0, int *finalNeighbourhoodSize=nullptr) const
Finds the nearest neighbours around a query point.
Definition: DgmOctree.cpp:721
unsigned char findBestLevelForAGivenNeighbourhoodSizeExtraction(PointCoordinateType radius) const
Definition: DgmOctree.cpp:2664
int getPointsInSphericalNeighbourhood(const CCVector3 &sphereCenter, PointCoordinateType radius, NeighboursSet &neighbours, unsigned char level) const
Returns the points falling inside a sphere.
Definition: DgmOctree.cpp:1846
int build(GenericProgressCallback *progressCb=nullptr)
Builds the structure.
Definition: DgmOctree.cpp:196
std::vector< PointDescriptor > NeighboursSet
A set of neighbours.
Definition: DgmOctree.h:133
virtual unsigned size() const =0
Returns the number of points.
A generic 3D point cloud with index-based and presistent access to points.
virtual const CCVector3 * getPoint(unsigned index) const =0
Returns the ith point.
bool oneStep()
Increments total progress value of a single unit.
int getScalarFieldIndexByName(const char *name) const
Returns the index of a scalar field represented by its name.
ScalarField * getScalarField(int index) const
Returns a pointer to a specific scalar field.
unsigned getNumberOfScalarFields() const
Returns the number of associated (and active) scalar fields.
unsigned size() const override
Definition: PointCloudTpl.h:38
const CCVector3 * getPoint(unsigned index) const override
A very simple point cloud (no point duplication)
virtual unsigned getPointGlobalIndex(unsigned localIndex) const
virtual void clear(bool releaseMemory=false)
Clears the cloud.
A simple scalar field (to be associated to a point cloud)
Definition: ScalarField.h:25
void fill(ScalarType fillValue=0)
Fills the array with a particular value.
Definition: ScalarField.h:77
virtual void computeMinAndMax()
Determines the min and max values.
Definition: ScalarField.h:123
ScalarType & getValue(std::size_t index)
Definition: ScalarField.h:92
void setValue(std::size_t index, ScalarType value)
Definition: ScalarField.h:96
const char * getName() const
Returns scalar field name.
Definition: ScalarField.h:43
Main application interface (for plugins)
virtual void addToDB(ccHObject *obj, bool updateZoom=false, bool autoExpandDBTree=true, bool checkDimensions=false, bool autoRedraw=true)=0
virtual void dispToConsole(QString message, ConsoleMessageLevel level=STD_CONSOLE_MESSAGE)=0
Graphical progress indicator (thread-safe)
virtual void start() override
virtual void setInfo(const char *infoStr) override
Notifies some information about the ongoing process.
virtual void setMethodTitle(const char *methodTitle) override
Notifies the algorithm title.
static bool Classify(QString classifierFilename, const ClassifyParams &params, ccPointCloud *cloud, cloudViewer::GenericIndexedCloudPersist *corePoints, CorePointDescSet &corePointsDescriptors, ccPointCloud *realCorePoints=nullptr, ecvMainAppInterface *app=nullptr, QWidget *parentWidget=nullptr, bool silent=false)
Classify a point cloud.
static size_t TestVectorsOverlap(const std::vector< float > &first, const std::vector< float > &second)
Tests whether a vector contains the values of another one (at the end!)
static bool ComputeCorePointsDescriptors(cloudViewer::GenericIndexedCloud *corePoints, CorePointDescSet &corePointsDescriptors, ccGenericPointCloud *sourceCloud, const std::vector< float > &sortedScales, bool &invalidDescriptors, QString &error, unsigned descriptorID=DESC_DIMENSIONALITY, int maxThreadCount=0, cloudViewer::GenericProgressCallback *progressCb=0, cloudViewer::DgmOctree *inputOctree=0, std::vector< ccScalarField * > *roughnessSFs=0)
Computes the 'descriptors' for various scales on core points only.
__host__ __device__ float2 fabs(float2 v)
Definition: cutil_math.h:1254
int min(int a, int b)
Definition: cutil_math.h:53
int max(int a, int b)
Definition: cutil_math.h:48
static void error(char *msg)
Definition: lsd.c:159
void swap(cloudViewer::core::SmallVectorImpl< T > &LHS, cloudViewer::core::SmallVectorImpl< T > &RHS)
Implement std::swap in terms of SmallVector swap.
Definition: SmallVector.h:1370
float RefinePointClassif(const Classifier &classifier, const float confidence, float &distToBoundary, ccPointCloud *cloud, ccOctree *octree, unsigned char octreeLevel, cloudViewer::GenericIndexedCloudPersist *corePoints, cloudViewer::DgmOctree *corePointsOctree, unsigned char coreOctreeLevel, unsigned coreIndex, PointCoordinateType largestRadius, const std::vector< int > &corePointClasses)
static const char s_canupoMSCMetaData[]
static const char CANUPO_PER_LEVEL_ADDITIONAL_SF_NAME[]
std::vector< ccScalarField * > * roughnessSFs
bool invalidDescriptors
cloudViewer::NormalizedProgress * nProgress
cloudViewer::DgmOctree * octree
cloudViewer::GenericIndexedCloud * corePoints
bool processCanceled
ScaleParamsComputer * computer
unsigned char octreeLevel
Set of descriptors.
std::vector< float > params