ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
reconstruction.cc
Go to the documentation of this file.
1 // Copyright (c) 2018, ETH Zurich and UNC Chapel Hill.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 //
10 // * Redistributions in binary form must reproduce the above copyright
11 // notice, this list of conditions and the following disclaimer in the
12 // documentation and/or other materials provided with the distribution.
13 //
14 // * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
15 // its contributors may be used to endorse or promote products derived
16 // from this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 // POSSIBILITY OF SUCH DAMAGE.
29 //
30 // Author: Johannes L. schoenberg (jsch-at-demuc-dot-de)
31 
32 #include "base/reconstruction.h"
33 
34 #include <fstream>
35 
36 #include "base/database_cache.h"
37 #include "base/gps.h"
38 #include "base/pose.h"
39 #include "base/projection.h"
40 #include "base/triangulation.h"
41 #include "util/bitmap.h"
42 #include "util/misc.h"
43 #include "util/ply.h"
44 
45 namespace colmap {
46 
48  : correspondence_graph_(nullptr), num_added_points3D_(0) {}
49 
51  : correspondence_graph_(other.GetCorrespondenceGraph()),
52  cameras_(other.Cameras()),
53  images_(other.Images()),
54  points3D_(other.Points3D()),
55  image_pair_stats_(other.ImagePairs()),
56  reg_image_ids_(other.RegImageIds()),
57  num_added_points3D_(other.NumAddedPoints3D()) {}
58 
60  if (this != &other) {
61  correspondence_graph_ = other.GetCorrespondenceGraph();
62  cameras_ = other.Cameras();
63  images_ = other.Images();
64  points3D_ = other.Points3D();
65  image_pair_stats_ = other.ImagePairs();
66  reg_image_ids_ = other.RegImageIds();
67  num_added_points3D_ = other.NumAddedPoints3D();
68  }
69  return *this;
70 }
71 
72 std::unordered_set<point3D_t> Reconstruction::Point3DIds() const {
73  std::unordered_set<point3D_t> point3D_ids;
74  point3D_ids.reserve(points3D_.size());
75 
76  for (const auto& point3D : points3D_) {
77  point3D_ids.insert(point3D.first);
78  }
79 
80  return point3D_ids;
81 }
82 
83 void Reconstruction::Load(const DatabaseCache& database_cache) {
84  correspondence_graph_ = nullptr;
85 
86  // Add cameras.
87  cameras_.reserve(database_cache.NumCameras());
88  for (const auto& camera : database_cache.Cameras()) {
89  if (!ExistsCamera(camera.first)) {
90  AddCamera(camera.second);
91  }
92  // Else: camera was added before, e.g. with `ReadAllCameras`.
93  }
94 
95  // Add images.
96  images_.reserve(database_cache.NumImages());
97 
98  for (const auto& image : database_cache.Images()) {
99  if (ExistsImage(image.second.ImageId())) {
100  class Image& existing_image = Image(image.second.ImageId());
101  CHECK_EQ(existing_image.Name(), image.second.Name());
102  if (existing_image.NumPoints2D() == 0) {
103  existing_image.SetPoints2D(image.second.Points2D());
104  } else {
105  CHECK_EQ(image.second.NumPoints2D(),
106  existing_image.NumPoints2D());
107  }
108  existing_image.SetNumObservations(image.second.NumObservations());
109  existing_image.SetNumCorrespondences(
110  image.second.NumCorrespondences());
111  } else {
112  AddImage(image.second);
113  }
114  }
115 
116  // Add image pairs.
117  for (const auto& image_pair : database_cache.CorrespondenceGraph()
119  ImagePairStat image_pair_stat;
120  image_pair_stat.num_total_corrs = image_pair.second;
121  image_pair_stats_.emplace(image_pair.first, image_pair_stat);
122  }
123 }
124 
125 void Reconstruction::SetUp(const CorrespondenceGraph* correspondence_graph) {
126  CHECK_NOTNULL(correspondence_graph);
127  for (auto& image : images_) {
128  image.second.SetUp(Camera(image.second.CameraId()));
129  }
130  correspondence_graph_ = correspondence_graph;
131 
132  // If an existing model was loaded from disk and there were already images
133  // registered previously, we need to set observations as triangulated.
134  for (const auto image_id : reg_image_ids_) {
135  const class Image& image = Image(image_id);
136  for (point2D_t point2D_idx = 0; point2D_idx < image.NumPoints2D();
137  ++point2D_idx) {
138  if (image.Point2D(point2D_idx).HasPoint3D()) {
139  const bool kIsContinuedPoint3D = false;
140  SetObservationAsTriangulated(image_id, point2D_idx,
141  kIsContinuedPoint3D);
142  }
143  }
144  }
145 }
146 
148  correspondence_graph_ = nullptr;
149  image_pair_stats_.clear();
150 
151  // Remove all not yet registered images.
152  std::unordered_set<camera_t> keep_camera_ids;
153  for (auto it = images_.begin(); it != images_.end();) {
154  if (it->second.IsRegistered()) {
155  keep_camera_ids.insert(it->second.CameraId());
156  it->second.TearDown();
157  ++it;
158  } else {
159  it = images_.erase(it);
160  }
161  }
162 
163  // Remove all unused cameras.
164  for (auto it = cameras_.begin(); it != cameras_.end();) {
165  if (keep_camera_ids.count(it->first) == 0) {
166  it = cameras_.erase(it);
167  } else {
168  ++it;
169  }
170  }
171 
172  // Compress tracks.
173  for (auto& point3D : points3D_) {
174  point3D.second.Track().Compress();
175  }
176 }
177 
178 void Reconstruction::AddCamera(const class Camera& camera) {
179  CHECK(!ExistsCamera(camera.CameraId()));
180  CHECK(camera.VerifyParams());
181  cameras_.emplace(camera.CameraId(), camera);
182 }
183 
185  CHECK(!ExistsImage(image.ImageId()));
186  images_[image.ImageId()] = image;
187 }
188 
189 point3D_t Reconstruction::AddPoint3D(const Eigen::Vector3d& xyz,
190  const Track& track,
191  const Eigen::Vector3ub& color) {
192  const point3D_t point3D_id = ++num_added_points3D_;
193  CHECK(!ExistsPoint3D(point3D_id));
194 
195  class Point3D& point3D = points3D_[point3D_id];
196 
197  point3D.SetXYZ(xyz);
198  point3D.SetTrack(track);
199  point3D.SetColor(color);
200 
201  for (const auto& track_el : track.Elements()) {
202  class Image& image = Image(track_el.image_id);
203  CHECK(!image.Point2D(track_el.point2D_idx).HasPoint3D());
204  image.SetPoint3DForPoint2D(track_el.point2D_idx, point3D_id);
205  CHECK_LE(image.NumPoints3D(), image.NumPoints2D());
206  }
207 
208  const bool kIsContinuedPoint3D = false;
209 
210  for (const auto& track_el : track.Elements()) {
211  SetObservationAsTriangulated(track_el.image_id, track_el.point2D_idx,
212  kIsContinuedPoint3D);
213  }
214 
215  return point3D_id;
216 }
217 
219  const TrackElement& track_el) {
220  class Image& image = Image(track_el.image_id);
221  CHECK(!image.Point2D(track_el.point2D_idx).HasPoint3D());
222 
223  image.SetPoint3DForPoint2D(track_el.point2D_idx, point3D_id);
224  CHECK_LE(image.NumPoints3D(), image.NumPoints2D());
225 
226  class Point3D& point3D = Point3D(point3D_id);
227  point3D.Track().AddElement(track_el);
228 
229  const bool kIsContinuedPoint3D = true;
230  SetObservationAsTriangulated(track_el.image_id, track_el.point2D_idx,
231  kIsContinuedPoint3D);
232 }
233 
235  const point3D_t point3D_id2) {
236  const class Point3D& point3D1 = Point3D(point3D_id1);
237  const class Point3D& point3D2 = Point3D(point3D_id2);
238 
239  const Eigen::Vector3d merged_xyz =
240  (point3D1.Track().Length() * point3D1.XYZ() +
241  point3D2.Track().Length() * point3D2.XYZ()) /
242  (point3D1.Track().Length() + point3D2.Track().Length());
243  const Eigen::Vector3d merged_rgb =
244  (point3D1.Track().Length() * point3D1.Color().cast<double>() +
245  point3D2.Track().Length() * point3D2.Color().cast<double>()) /
246  (point3D1.Track().Length() + point3D2.Track().Length());
247 
248  Track merged_track;
249  merged_track.Reserve(point3D1.Track().Length() + point3D2.Track().Length());
250  merged_track.AddElements(point3D1.Track().Elements());
251  merged_track.AddElements(point3D2.Track().Elements());
252 
253  DeletePoint3D(point3D_id1);
254  DeletePoint3D(point3D_id2);
255 
256  const point3D_t merged_point3D_id =
257  AddPoint3D(merged_xyz, merged_track, merged_rgb.cast<uint8_t>());
258 
259  return merged_point3D_id;
260 }
261 
262 void Reconstruction::DeletePoint3D(const point3D_t point3D_id) {
263  // Note: Do not change order of these instructions, especially with respect
264  // to `Reconstruction::ResetTriObservations`
265 
266  const class Track& track = Point3D(point3D_id).Track();
267 
268  const bool kIsDeletedPoint3D = true;
269 
270  for (const auto& track_el : track.Elements()) {
271  ResetTriObservations(track_el.image_id, track_el.point2D_idx,
272  kIsDeletedPoint3D);
273  }
274 
275  for (const auto& track_el : track.Elements()) {
276  class Image& image = Image(track_el.image_id);
277  image.ResetPoint3DForPoint2D(track_el.point2D_idx);
278  }
279 
280  points3D_.erase(point3D_id);
281 }
282 
284  const point2D_t point2D_idx) {
285  // Note: Do not change order of these instructions, especially with respect
286  // to `Reconstruction::ResetTriObservations`
287 
288  class Image& image = Image(image_id);
289  const point3D_t point3D_id = image.Point2D(point2D_idx).Point3DId();
290  class Point3D& point3D = Point3D(point3D_id);
291 
292  if (point3D.Track().Length() <= 2) {
293  DeletePoint3D(point3D_id);
294  return;
295  }
296 
297  point3D.Track().DeleteElement(image_id, point2D_idx);
298 
299  const bool kIsDeletedPoint3D = false;
300  ResetTriObservations(image_id, point2D_idx, kIsDeletedPoint3D);
301 
302  image.ResetPoint3DForPoint2D(point2D_idx);
303 }
304 
306  points3D_.clear();
307  for (auto& image : images_) {
308  class Image new_image;
309  new_image.SetImageId(image.second.ImageId());
310  new_image.SetName(image.second.Name());
311  new_image.SetCameraId(image.second.CameraId());
312  new_image.SetRegistered(image.second.IsRegistered());
313  new_image.SetNumCorrespondences(image.second.NumCorrespondences());
314  new_image.SetQvec(image.second.Qvec());
315  new_image.SetQvecPrior(image.second.QvecPrior());
316  new_image.SetTvec(image.second.Tvec());
317  new_image.SetTvecPrior(image.second.TvecPrior());
318  image.second = new_image;
319  }
320 }
321 
323  class Image& image = Image(image_id);
324  if (!image.IsRegistered()) {
325  image.SetRegistered(true);
326  reg_image_ids_.push_back(image_id);
327  }
328 }
329 
331  class Image& image = Image(image_id);
332 
333  for (point2D_t point2D_idx = 0; point2D_idx < image.NumPoints2D();
334  ++point2D_idx) {
335  if (image.Point2D(point2D_idx).HasPoint3D()) {
336  DeleteObservation(image_id, point2D_idx);
337  }
338  }
339 
340  image.SetRegistered(false);
341 
342  reg_image_ids_.erase(
343  std::remove(reg_image_ids_.begin(), reg_image_ids_.end(), image_id),
344  reg_image_ids_.end());
345 }
346 
347 void Reconstruction::Normalize(const double extent,
348  const double p0,
349  const double p1,
350  const bool use_images) {
351  CHECK_GT(extent, 0);
352 
353  if ((use_images && reg_image_ids_.size() < 2) ||
354  (!use_images && points3D_.size() < 2)) {
355  return;
356  }
357 
358  auto bound = ComputeBoundsAndCentroid(p0, p1, use_images);
359 
360  // Calculate scale and translation, such that
361  // translation is applied before scaling.
362  const double old_extent = (std::get<1>(bound) - std::get<0>(bound)).norm();
363  double scale;
364  if (old_extent < std::numeric_limits<double>::epsilon()) {
365  scale = 1;
366  } else {
367  scale = extent / old_extent;
368  }
369 
371  -scale * std::get<2>(bound));
372  Transform(tform);
373 }
374 
375 Eigen::Vector3d Reconstruction::ComputeCentroid(const double p0,
376  const double p1) const {
377  return std::get<2>(ComputeBoundsAndCentroid(p0, p1, false));
378 }
379 
380 std::pair<Eigen::Vector3d, Eigen::Vector3d> Reconstruction::ComputeBoundingBox(
381  const double p0, const double p1) const {
382  auto bound = ComputeBoundsAndCentroid(p0, p1, false);
383  return std::make_pair(std::get<0>(bound), std::get<1>(bound));
384 }
385 
386 std::tuple<Eigen::Vector3d, Eigen::Vector3d, Eigen::Vector3d>
387 Reconstruction::ComputeBoundsAndCentroid(const double p0,
388  const double p1,
389  const bool use_images) const {
390  CHECK_GE(p0, 0);
391  CHECK_LE(p0, 1);
392  CHECK_GE(p1, 0);
393  CHECK_LE(p1, 1);
394  CHECK_LE(p0, p1);
395 
396  const size_t num_elements =
397  use_images ? reg_image_ids_.size() : points3D_.size();
398  if (num_elements == 0) {
399  return std::make_tuple(Eigen::Vector3d(0, 0, 0),
400  Eigen::Vector3d(0, 0, 0),
401  Eigen::Vector3d(0, 0, 0));
402  }
403 
404  // Coordinates of image centers or point locations.
405  std::vector<float> coords_x;
406  std::vector<float> coords_y;
407  std::vector<float> coords_z;
408  if (use_images) {
409  coords_x.reserve(reg_image_ids_.size());
410  coords_y.reserve(reg_image_ids_.size());
411  coords_z.reserve(reg_image_ids_.size());
412  for (const image_t im_id : reg_image_ids_) {
413  const Eigen::Vector3d proj_center = Image(im_id).ProjectionCenter();
414  coords_x.push_back(static_cast<float>(proj_center(0)));
415  coords_y.push_back(static_cast<float>(proj_center(1)));
416  coords_z.push_back(static_cast<float>(proj_center(2)));
417  }
418  } else {
419  coords_x.reserve(points3D_.size());
420  coords_y.reserve(points3D_.size());
421  coords_z.reserve(points3D_.size());
422  for (const auto& point3D : points3D_) {
423  coords_x.push_back(static_cast<float>(point3D.second.X()));
424  coords_y.push_back(static_cast<float>(point3D.second.Y()));
425  coords_z.push_back(static_cast<float>(point3D.second.Z()));
426  }
427  }
428 
429  // Determine robust bounding box and mean.
430 
431  std::sort(coords_x.begin(), coords_x.end());
432  std::sort(coords_y.begin(), coords_y.end());
433  std::sort(coords_z.begin(), coords_z.end());
434 
435  const size_t P0 = static_cast<size_t>(
436  (coords_x.size() > 3) ? p0 * (coords_x.size() - 1) : 0);
437  const size_t P1 = static_cast<size_t>((coords_x.size() > 3)
438  ? p1 * (coords_x.size() - 1)
439  : coords_x.size() - 1);
440 
441  const Eigen::Vector3d bbox_min(coords_x[P0], coords_y[P0], coords_z[P0]);
442  const Eigen::Vector3d bbox_max(coords_x[P1], coords_y[P1], coords_z[P1]);
443 
444  Eigen::Vector3d mean_coord(0, 0, 0);
445  for (size_t i = P0; i <= P1; ++i) {
446  mean_coord(0) += coords_x[i];
447  mean_coord(1) += coords_y[i];
448  mean_coord(2) += coords_z[i];
449  }
450  mean_coord /= P1 - P0 + 1;
451 
452  return std::make_tuple(bbox_min, bbox_max, mean_coord);
453 }
454 
456  for (auto& image : images_) {
457  tform.TransformPose(&image.second.Qvec(), &image.second.Tvec());
458  }
459  for (auto& point3D : points3D_) {
460  tform.TransformPoint(&point3D.second.XYZ());
461  }
462 }
463 
465  const std::pair<Eigen::Vector3d, Eigen::Vector3d>& bbox) const {
466  // add all cameras and images. Only the registered images will be used.
467  Reconstruction reconstruction;
468  for (const auto& camera_el : cameras_) {
469  reconstruction.AddCamera(camera_el.second);
470  }
471  for (const auto& image_el : images_) {
472  reconstruction.AddImage(image_el.second);
473  auto& image = reconstruction.Image(image_el.first);
474  image.SetRegistered(false);
475  for (point2D_t pid = 0; pid < image.NumPoints2D(); ++pid) {
476  image.ResetPoint3DForPoint2D(pid);
477  }
478  }
479  for (const auto& point_el : points3D_) {
480  const auto& point = point_el.second;
481  if ((point.XYZ().array() >= bbox.first.array()).all() &&
482  (point.XYZ().array() <= bbox.second.array()).all()) {
483  for (const auto& track_el : point.Track().Elements()) {
484  reconstruction.RegisterImage(track_el.image_id);
485  }
486  reconstruction.AddPoint3D(point.XYZ(), point.Track(),
487  point.Color());
488  }
489  }
490  return reconstruction;
491 }
492 
493 bool Reconstruction::Merge(const Reconstruction& reconstruction,
494  const double max_reproj_error) {
495  const double kMinInlierObservations = 0.3;
496 
497  Eigen::Matrix3x4d alignment;
498  if (!ComputeAlignmentBetweenReconstructions(reconstruction, *this,
499  kMinInlierObservations,
500  max_reproj_error, &alignment)) {
501  return false;
502  }
503 
504  const SimilarityTransform3 tform(alignment);
505 
506  // Find common and missing images in the two reconstructions.
507 
508  std::unordered_set<image_t> common_image_ids;
509  common_image_ids.reserve(reconstruction.NumRegImages());
510  std::unordered_set<image_t> missing_image_ids;
511  missing_image_ids.reserve(reconstruction.NumRegImages());
512 
513  for (const auto& image_id : reconstruction.RegImageIds()) {
514  if (ExistsImage(image_id)) {
515  common_image_ids.insert(image_id);
516  } else {
517  missing_image_ids.insert(image_id);
518  }
519  }
520 
521  // Register the missing images in this reconstruction.
522 
523  for (const auto image_id : missing_image_ids) {
524  auto reg_image = reconstruction.Image(image_id);
525  reg_image.SetRegistered(false);
526  AddImage(reg_image);
527  RegisterImage(image_id);
528  if (!ExistsCamera(reg_image.CameraId())) {
529  AddCamera(reconstruction.Camera(reg_image.CameraId()));
530  }
531  auto& image = Image(image_id);
532  tform.TransformPose(&image.Qvec(), &image.Tvec());
533  }
534 
535  // Merge the two point clouds using the following two rules:
536  // - copy points to this reconstruction with non-conflicting tracks,
537  // i.e. points that do not have an already triangulated observation
538  // in this reconstruction.
539  // - merge tracks that are unambiguous, i.e. only merge points in the two
540  // reconstructions if they have a one-to-one mapping.
541  // Note that in both cases no cheirality or reprojection test is performed.
542 
543  for (const auto& point3D : reconstruction.Points3D()) {
544  Track new_track;
545  Track old_track;
546  std::unordered_set<point3D_t> old_point3D_ids;
547  for (const auto& track_el : point3D.second.Track().Elements()) {
548  if (common_image_ids.count(track_el.image_id) > 0) {
549  const auto& point2D =
550  Image(track_el.image_id).Point2D(track_el.point2D_idx);
551  if (point2D.HasPoint3D()) {
552  old_track.AddElement(track_el);
553  old_point3D_ids.insert(point2D.Point3DId());
554  } else {
555  new_track.AddElement(track_el);
556  }
557  } else if (missing_image_ids.count(track_el.image_id) > 0) {
558  Image(track_el.image_id)
559  .ResetPoint3DForPoint2D(track_el.point2D_idx);
560  new_track.AddElement(track_el);
561  }
562  }
563 
564  const bool create_new_point = new_track.Length() >= 2;
565  const bool merge_new_and_old_point =
566  (new_track.Length() + old_track.Length()) >= 2 &&
567  old_point3D_ids.size() == 1;
568  if (create_new_point || merge_new_and_old_point) {
569  Eigen::Vector3d xyz = point3D.second.XYZ();
570  tform.TransformPoint(&xyz);
571  const auto point3D_id =
572  AddPoint3D(xyz, new_track, point3D.second.Color());
573  if (old_point3D_ids.size() == 1) {
574  MergePoints3D(point3D_id, *old_point3D_ids.begin());
575  }
576  }
577  }
578 
579  FilterPoints3DWithLargeReprojectionError(max_reproj_error, Point3DIds());
580 
581  return true;
582 }
583 
585  const std::string& name) const {
586  for (const auto& image : images_) {
587  if (image.second.Name() == name) {
588  return &image.second;
589  }
590  }
591  return nullptr;
592 }
593 
595  const Reconstruction& reconstruction) const {
596  std::vector<image_t> common_reg_image_ids;
597  for (const auto image_id : reg_image_ids_) {
598  if (reconstruction.ExistsImage(image_id) &&
599  reconstruction.IsImageRegistered(image_id)) {
600  CHECK_EQ(Image(image_id).Name(),
601  reconstruction.Image(image_id).Name());
602  common_reg_image_ids.push_back(image_id);
603  }
604  }
605  return common_reg_image_ids;
606 }
607 
609  std::unordered_map<image_t, image_t> old_to_new_image_ids;
610  old_to_new_image_ids.reserve(NumImages());
611 
612  std::unordered_map<image_t, class Image> new_images;
613  new_images.reserve(NumImages());
614 
615  for (auto& image : images_) {
616  if (!database.ExistsImageWithName(image.second.Name())) {
617  LOG(FATAL) << "Image with name " << image.second.Name()
618  << " does not exist in database";
619  }
620 
621  const auto database_image =
622  database.ReadImageWithName(image.second.Name());
623  old_to_new_image_ids.emplace(image.second.ImageId(),
624  database_image.ImageId());
625  image.second.SetImageId(database_image.ImageId());
626  new_images.emplace(database_image.ImageId(), image.second);
627  }
628 
629  images_ = std::move(new_images);
630 
631  for (auto& image_id : reg_image_ids_) {
632  image_id = old_to_new_image_ids.at(image_id);
633  }
634 
635  for (auto& point3D : points3D_) {
636  for (auto& track_el : point3D.second.Track().Elements()) {
637  track_el.image_id = old_to_new_image_ids.at(track_el.image_id);
638  }
639  }
640 }
641 
643  const double max_reproj_error,
644  const double min_tri_angle,
645  const std::unordered_set<point3D_t>& point3D_ids) {
646  size_t num_filtered = 0;
647  num_filtered += FilterPoints3DWithLargeReprojectionError(max_reproj_error,
648  point3D_ids);
649  num_filtered += FilterPoints3DWithSmallTriangulationAngle(min_tri_angle,
650  point3D_ids);
651  return num_filtered;
652 }
653 
655  const double max_reproj_error,
656  const double min_tri_angle,
657  const std::unordered_set<image_t>& image_ids) {
658  std::unordered_set<point3D_t> point3D_ids;
659  for (const image_t image_id : image_ids) {
660  const class Image& image = Image(image_id);
661  for (const Point2D& point2D : image.Points2D()) {
662  if (point2D.HasPoint3D()) {
663  point3D_ids.insert(point2D.Point3DId());
664  }
665  }
666  }
667  return FilterPoints3D(max_reproj_error, min_tri_angle, point3D_ids);
668 }
669 
670 size_t Reconstruction::FilterAllPoints3D(const double max_reproj_error,
671  const double min_tri_angle) {
672  // Important: First filter observations and points with large reprojection
673  // error, so that observations with large reprojection error do not make
674  // a point stable through a large triangulation angle.
675  const std::unordered_set<point3D_t>& point3D_ids = Point3DIds();
676  size_t num_filtered = 0;
677  num_filtered += FilterPoints3DWithLargeReprojectionError(max_reproj_error,
678  point3D_ids);
679  num_filtered += FilterPoints3DWithSmallTriangulationAngle(min_tri_angle,
680  point3D_ids);
681  return num_filtered;
682 }
683 
685  size_t num_filtered = 0;
686  for (const auto image_id : reg_image_ids_) {
687  const class Image& image = Image(image_id);
688  const Eigen::Matrix3x4d proj_matrix = image.ProjectionMatrix();
689  for (point2D_t point2D_idx = 0; point2D_idx < image.NumPoints2D();
690  ++point2D_idx) {
691  const Point2D& point2D = image.Point2D(point2D_idx);
692  if (point2D.HasPoint3D()) {
693  const class Point3D& point3D = Point3D(point2D.Point3DId());
694  if (!HasPointPositiveDepth(proj_matrix, point3D.XYZ())) {
695  DeleteObservation(image_id, point2D_idx);
696  num_filtered += 1;
697  }
698  }
699  }
700  }
701  return num_filtered;
702 }
703 
704 std::vector<image_t> Reconstruction::FilterImages(
705  const double min_focal_length_ratio,
706  const double max_focal_length_ratio,
707  const double max_extra_param) {
708  std::vector<image_t> filtered_image_ids;
709  for (const image_t image_id : RegImageIds()) {
710  const class Image& image = Image(image_id);
711  const class Camera& camera = Camera(image.CameraId());
712  if (image.NumPoints3D() == 0) {
713  filtered_image_ids.push_back(image_id);
714  } else if (camera.HasBogusParams(min_focal_length_ratio,
715  max_focal_length_ratio,
716  max_extra_param)) {
717  filtered_image_ids.push_back(image_id);
718  }
719  }
720 
721  // Only de-register after iterating over reg_image_ids_ to avoid
722  // simultaneous iteration and modification of the vector.
723  for (const image_t image_id : filtered_image_ids) {
724  DeRegisterImage(image_id);
725  }
726 
727  return filtered_image_ids;
728 }
729 
731  size_t num_obs = 0;
732  for (const image_t image_id : reg_image_ids_) {
733  num_obs += Image(image_id).NumPoints3D();
734  }
735  return num_obs;
736 }
737 
739  if (points3D_.empty()) {
740  return 0.0;
741  } else {
742  return ComputeNumObservations() / static_cast<double>(points3D_.size());
743  }
744 }
745 
747  if (reg_image_ids_.empty()) {
748  return 0.0;
749  } else {
750  return ComputeNumObservations() /
751  static_cast<double>(reg_image_ids_.size());
752  }
753 }
754 
756  double error_sum = 0.0;
757  size_t num_valid_errors = 0;
758  for (const auto& point3D : points3D_) {
759  if (point3D.second.HasError()) {
760  error_sum += point3D.second.Error();
761  num_valid_errors += 1;
762  }
763  }
764 
765  if (num_valid_errors == 0) {
766  return 0.0;
767  } else {
768  return error_sum / num_valid_errors;
769  }
770 }
771 
772 void Reconstruction::Read(const std::string& path) {
773  if (ExistsFile(JoinPaths(path, "cameras.bin")) &&
774  ExistsFile(JoinPaths(path, "images.bin")) &&
775  ExistsFile(JoinPaths(path, "points3D.bin"))) {
776  ReadBinary(path);
777  } else if (ExistsFile(JoinPaths(path, "cameras.txt")) &&
778  ExistsFile(JoinPaths(path, "images.txt")) &&
779  ExistsFile(JoinPaths(path, "points3D.txt"))) {
780  ReadText(path);
781  } else {
782  LOG(FATAL) << "cameras, images, points3D files do not exist at "
783  << path;
784  }
785 }
786 
787 void Reconstruction::Write(const std::string& path) const { WriteBinary(path); }
788 
789 void Reconstruction::ReadText(const std::string& path) {
790  ReadCamerasText(JoinPaths(path, "cameras.txt"));
791  ReadImagesText(JoinPaths(path, "images.txt"));
792  ReadPoints3DText(JoinPaths(path, "points3D.txt"));
793 }
794 
795 void Reconstruction::ReadBinary(const std::string& path) {
796  ReadCamerasBinary(JoinPaths(path, "cameras.bin"));
797  ReadImagesBinary(JoinPaths(path, "images.bin"));
798  ReadPoints3DBinary(JoinPaths(path, "points3D.bin"));
799 }
800 
801 void Reconstruction::WriteText(const std::string& path) const {
802  WriteCamerasText(JoinPaths(path, "cameras.txt"));
803  WriteImagesText(JoinPaths(path, "images.txt"));
804  WritePoints3DText(JoinPaths(path, "points3D.txt"));
805 }
806 
807 void Reconstruction::WriteBinary(const std::string& path) const {
808  WriteCamerasBinary(JoinPaths(path, "cameras.bin"));
809  WriteImagesBinary(JoinPaths(path, "images.bin"));
810  WritePoints3DBinary(JoinPaths(path, "points3D.bin"));
811 }
812 
813 std::vector<PlyPoint> Reconstruction::ConvertToPLY() const {
814  std::vector<PlyPoint> ply_points;
815  ply_points.reserve(points3D_.size());
816 
817  for (const auto& point3D : points3D_) {
818  PlyPoint ply_point;
819  ply_point.x = point3D.second.X();
820  ply_point.y = point3D.second.Y();
821  ply_point.z = point3D.second.Z();
822  ply_point.r = point3D.second.Color(0);
823  ply_point.g = point3D.second.Color(1);
824  ply_point.b = point3D.second.Color(2);
825  ply_points.push_back(ply_point);
826  }
827 
828  return ply_points;
829 }
830 
831 void Reconstruction::ImportPLY(const std::string& path) {
832  points3D_.clear();
833 
834  const auto ply_points = ReadPly(path);
835 
836  points3D_.reserve(ply_points.size());
837 
838  for (const auto& ply_point : ply_points) {
839  AddPoint3D(Eigen::Vector3d(ply_point.x, ply_point.y, ply_point.z),
840  Track(),
841  Eigen::Vector3ub(ply_point.r, ply_point.g, ply_point.b));
842  }
843 }
844 
845 void Reconstruction::ImportPLY(const std::vector<PlyPoint>& ply_points) {
846  points3D_.clear();
847  points3D_.reserve(ply_points.size());
848  for (const auto& ply_point : ply_points) {
849  AddPoint3D(Eigen::Vector3d(ply_point.x, ply_point.y, ply_point.z),
850  Track(),
851  Eigen::Vector3ub(ply_point.r, ply_point.g, ply_point.b));
852  }
853 }
854 
855 bool Reconstruction::ExportNVM(const std::string& path,
856  bool skip_distortion) const {
857  std::ofstream file(path, std::ios::trunc);
858  CHECK(file.is_open()) << path;
859 
860  // Ensure that we don't lose any precision by storing in text.
861  file.precision(17);
862 
863  // White space added for compatibility with Meshlab.
864  file << "NVM_V3 " << std::endl << " " << std::endl;
865  file << reg_image_ids_.size() << " " << std::endl;
866 
867  std::unordered_map<image_t, size_t> image_id_to_idx_;
868  size_t image_idx = 0;
869 
870  for (const auto image_id : reg_image_ids_) {
871  const class Image& image = Image(image_id);
872  const class Camera& camera = Camera(image.CameraId());
873 
874  double k;
875  if (skip_distortion ||
878  k = 0.0;
879  } else if (camera.ModelId() == SimpleRadialCameraModel::model_id) {
880  k = -1 *
882  } else {
883  std::cout << "WARNING: NVM only supports `SIMPLE_RADIAL` "
884  "and pinhole camera models."
885  << std::endl;
886  return false;
887  }
888 
889  const Eigen::Vector3d proj_center = image.ProjectionCenter();
890 
891  file << image.Name() << " ";
892  file << camera.MeanFocalLength() << " ";
893  file << image.Qvec(0) << " ";
894  file << image.Qvec(1) << " ";
895  file << image.Qvec(2) << " ";
896  file << image.Qvec(3) << " ";
897  file << proj_center(0) << " ";
898  file << proj_center(1) << " ";
899  file << proj_center(2) << " ";
900  file << k << " ";
901  file << 0 << std::endl;
902 
903  image_id_to_idx_[image_id] = image_idx;
904  image_idx += 1;
905  }
906 
907  file << std::endl << points3D_.size() << std::endl;
908 
909  for (const auto& point3D : points3D_) {
910  file << point3D.second.XYZ()(0) << " ";
911  file << point3D.second.XYZ()(1) << " ";
912  file << point3D.second.XYZ()(2) << " ";
913  file << static_cast<int>(point3D.second.Color(0)) << " ";
914  file << static_cast<int>(point3D.second.Color(1)) << " ";
915  file << static_cast<int>(point3D.second.Color(2)) << " ";
916 
917  std::ostringstream line;
918 
919  std::unordered_set<image_t> image_ids;
920  for (const auto& track_el : point3D.second.Track().Elements()) {
921  // Make sure that each point only has a single observation per
922  // image, since VisualSfM does not support with multiple
923  // observations.
924  if (image_ids.count(track_el.image_id) == 0) {
925  const class Image& image = Image(track_el.image_id);
926  const Point2D& point2D = image.Point2D(track_el.point2D_idx);
927  line << image_id_to_idx_[track_el.image_id] << " ";
928  line << track_el.point2D_idx << " ";
929  line << point2D.X() << " ";
930  line << point2D.Y() << " ";
931  image_ids.insert(track_el.image_id);
932  }
933  }
934 
935  std::string line_string = line.str();
936  line_string = line_string.substr(0, line_string.size() - 1);
937 
938  file << image_ids.size() << " ";
939  file << line_string << std::endl;
940  }
941 
942  return true;
943 }
944 
945 bool Reconstruction::ExportCam(const std::string& path,
946  bool skip_distortion) const {
948  for (const auto image_id : reg_image_ids_) {
949  std::string name, ext;
950  const class Image& image = Image(image_id);
951  const class Camera& camera = Camera(image.CameraId());
952 
953  SplitFileExtension(image.Name(), &name, &ext);
954  name = JoinPaths(path, name + ".cam");
955  std::ofstream file(name, std::ios::trunc);
956 
957  CHECK(file.is_open()) << name;
958 
959  // Ensure that we don't lose any precision by storing in text.
960  file.precision(17);
961 
962  double k1, k2;
963  if (skip_distortion ||
966  k1 = 0.0;
967  k2 = 0.0;
968  } else if (camera.ModelId() == SimpleRadialCameraModel::model_id) {
970  k2 = 0.0;
971  } else if (camera.ModelId() == RadialCameraModel::model_id) {
974  } else {
975  std::cout
976  << "WARNING: CAM only supports `SIMPLE_RADIAL`, `RADIAL`, "
977  "and pinhole camera models."
978  << std::endl;
979  return false;
980  }
981 
982  // If both k1 and k2 values are non-zero, then the CAM format assumes
983  // a Bundler-like radial distortion model, which converts well from
984  // COLMAP. However, if k2 is zero, then a different model is used
985  // that does not translate as well, so we avoid setting k2 to zero.
986  if (k1 != 0.0 && k2 == 0.0) {
987  k2 = 1e-10;
988  }
989 
990  double fx, fy;
991  if (camera.FocalLengthIdxs().size() == 2) {
992  fx = camera.FocalLengthX();
993  fy = camera.FocalLengthY();
994  } else {
995  fx = fy = camera.MeanFocalLength();
996  }
997 
998  double focal_length;
999  if (camera.Width() * fy < camera.Height() * fx) {
1000  focal_length = fy / camera.Height();
1001  } else {
1002  focal_length = fx / camera.Width();
1003  }
1004 
1005  const Eigen::Matrix3d rot_mat = image.RotationMatrix();
1006  file << image.Tvec(0) << " " << image.Tvec(1) << " " << image.Tvec(2)
1007  << " " << rot_mat(0, 0) << " " << rot_mat(0, 1) << " "
1008  << rot_mat(0, 2) << " " << rot_mat(1, 0) << " " << rot_mat(1, 1)
1009  << " " << rot_mat(1, 2) << " " << rot_mat(2, 0) << " "
1010  << rot_mat(2, 1) << " " << rot_mat(2, 2) << std::endl;
1011  file << focal_length << " " << k1 << " " << k2 << " " << fy / fx << " "
1012  << camera.PrincipalPointX() / camera.Width() << " "
1013  << camera.PrincipalPointY() / camera.Height() << std::endl;
1014  }
1015 
1016  return true;
1017 }
1018 
1019 bool Reconstruction::ExportRecon3D(const std::string& path,
1020  bool skip_distortion) const {
1021  std::string base_path = EnsureTrailingSlash(StringReplace(path, "\\", "/"));
1022  CreateDirIfNotExists(base_path);
1023  base_path = base_path.append("Recon/");
1024  CreateDirIfNotExists(base_path);
1025  std::string synth_path = base_path + "synth_0.out";
1026  std::string image_list_path = base_path + "urd-images.txt";
1027  std::string image_map_path = base_path + "imagemap_0.txt";
1028 
1029  std::ofstream synth_file(synth_path, std::ios::trunc);
1030  CHECK(synth_file.is_open()) << synth_path;
1031  std::ofstream image_list_file(image_list_path, std::ios::trunc);
1032  CHECK(image_list_file.is_open()) << image_list_path;
1033  std::ofstream image_map_file(image_map_path, std::ios::trunc);
1034  CHECK(image_map_file.is_open()) << image_map_path;
1035 
1036  // Ensure that we don't lose any precision by storing in text.
1037  synth_file.precision(17);
1038 
1039  // Write header info
1040  synth_file << "colmap 1.0" << std::endl;
1041  synth_file << reg_image_ids_.size() << " " << points3D_.size() << std::endl;
1042 
1043  std::unordered_map<image_t, size_t> image_id_to_idx_;
1044  size_t image_idx = 0;
1045 
1046  // Write image/camera info
1047  for (const auto image_id : reg_image_ids_) {
1048  const class Image& image = Image(image_id);
1049  const class Camera& camera = Camera(image.CameraId());
1050 
1051  double k1, k2;
1052  if (skip_distortion ||
1054  camera.ModelId() == PinholeCameraModel::model_id) {
1055  k1 = 0.0;
1056  k2 = 0.0;
1057  } else if (camera.ModelId() == SimpleRadialCameraModel::model_id) {
1058  k1 = -1 *
1060  k2 = 0.0;
1061  } else if (camera.ModelId() == RadialCameraModel::model_id) {
1062  k1 = -1 * camera.Params(RadialCameraModel::extra_params_idxs[0]);
1063  k2 = -1 * camera.Params(RadialCameraModel::extra_params_idxs[1]);
1064  } else {
1065  std::cout << "WARNING: Recon3D only supports `SIMPLE_RADIAL`, "
1066  "`RADIAL`, and pinhole camera models."
1067  << std::endl;
1068  return false;
1069  }
1070 
1071  const double scale =
1072  1.0 / (double)std::max(camera.Width(), camera.Height());
1073  synth_file << scale * camera.MeanFocalLength() << " " << k1 << " " << k2
1074  << std::endl;
1075  synth_file << QuaternionToRotationMatrix(
1076  NormalizeQuaternion(image.Qvec()))
1077  << std::endl;
1078  synth_file << image.Tvec(0) << " " << image.Tvec(1) << " "
1079  << image.Tvec(2) << std::endl;
1080 
1081  image_id_to_idx_[image_id] = image_idx;
1082  image_list_file << image.Name() << std::endl
1083  << camera.Width() << " " << camera.Height()
1084  << std::endl;
1085  image_map_file << image_idx << std::endl;
1086 
1087  image_idx += 1;
1088  }
1089  image_list_file.close();
1090  image_map_file.close();
1091 
1092  // Write point info
1093  for (const auto& point3D : points3D_) {
1094  auto& p = point3D.second;
1095  synth_file << p.XYZ()(0) << " " << p.XYZ()(1) << " " << p.XYZ()(2)
1096  << std::endl;
1097  synth_file << (int)p.Color(0) << " " << (int)p.Color(1) << " "
1098  << (int)p.Color(2) << std::endl;
1099 
1100  std::ostringstream line;
1101 
1102  std::unordered_set<image_t> image_ids;
1103  for (const auto& track_el : p.Track().Elements()) {
1104  // Make sure that each point only has a single observation per
1105  // image, since VisualSfM does not support with multiple
1106  // observations.
1107  if (image_ids.count(track_el.image_id) == 0) {
1108  const class Image& image = Image(track_el.image_id);
1109  const class Camera& camera = Camera(image.CameraId());
1110  const Point2D& point2D = image.Point2D(track_el.point2D_idx);
1111 
1112  const double scale =
1113  1.0 / (double)std::max(camera.Width(), camera.Height());
1114 
1115  line << image_id_to_idx_[track_el.image_id] << " ";
1116  line << track_el.point2D_idx << " ";
1117  // Use a scale of -1.0 to mark as invalid as it is not needed
1118  // currently
1119  line << "-1.0 ";
1120  line << (point2D.X() - camera.PrincipalPointX()) * scale << " ";
1121  line << (point2D.Y() - camera.PrincipalPointY()) * scale << " ";
1122  image_ids.insert(track_el.image_id);
1123  }
1124  }
1125 
1126  std::string line_string = line.str();
1127  line_string = line_string.substr(0, line_string.size() - 1);
1128 
1129  synth_file << image_ids.size() << " ";
1130  synth_file << line_string << std::endl;
1131  }
1132  synth_file.close();
1133 
1134  return true;
1135 }
1136 
1137 bool Reconstruction::ExportBundler(const std::string& path,
1138  const std::string& list_path,
1139  bool skip_distortion) const {
1140  std::ofstream file(path, std::ios::trunc);
1141  CHECK(file.is_open()) << path;
1142 
1143  std::ofstream list_file(list_path, std::ios::trunc);
1144  CHECK(list_file.is_open()) << list_path;
1145 
1146  // Ensure that we don't lose any precision by storing in text.
1147  file.precision(17);
1148 
1149  file << "# Bundle file v0.3" << std::endl;
1150 
1151  file << reg_image_ids_.size() << " " << points3D_.size() << std::endl;
1152 
1153  std::unordered_map<image_t, size_t> image_id_to_idx_;
1154  size_t image_idx = 0;
1155 
1156  for (const image_t image_id : reg_image_ids_) {
1157  const class Image& image = Image(image_id);
1158  const class Camera& camera = Camera(image.CameraId());
1159 
1160  double k1, k2;
1161  if (skip_distortion ||
1163  camera.ModelId() == PinholeCameraModel::model_id) {
1164  k1 = 0.0;
1165  k2 = 0.0;
1166  } else if (camera.ModelId() == SimpleRadialCameraModel::model_id) {
1168  k2 = 0.0;
1169  } else if (camera.ModelId() == RadialCameraModel::model_id) {
1172  } else {
1173  std::cout << "WARNING: Bundler only supports `SIMPLE_RADIAL`, "
1174  "`RADIAL`, and pinhole camera models."
1175  << std::endl;
1176  return false;
1177  }
1178 
1179  file << camera.MeanFocalLength() << " " << k1 << " " << k2 << std::endl;
1180 
1181  const Eigen::Matrix3d R = image.RotationMatrix();
1182  file << R(0, 0) << " " << R(0, 1) << " " << R(0, 2) << std::endl;
1183  file << -R(1, 0) << " " << -R(1, 1) << " " << -R(1, 2) << std::endl;
1184  file << -R(2, 0) << " " << -R(2, 1) << " " << -R(2, 2) << std::endl;
1185 
1186  file << image.Tvec(0) << " ";
1187  file << -image.Tvec(1) << " ";
1188  file << -image.Tvec(2) << std::endl;
1189 
1190  list_file << image.Name() << std::endl;
1191 
1192  image_id_to_idx_[image_id] = image_idx;
1193  image_idx += 1;
1194  }
1195 
1196  for (const auto& point3D : points3D_) {
1197  file << point3D.second.XYZ()(0) << " ";
1198  file << point3D.second.XYZ()(1) << " ";
1199  file << point3D.second.XYZ()(2) << std::endl;
1200 
1201  file << static_cast<int>(point3D.second.Color(0)) << " ";
1202  file << static_cast<int>(point3D.second.Color(1)) << " ";
1203  file << static_cast<int>(point3D.second.Color(2)) << std::endl;
1204 
1205  std::ostringstream line;
1206 
1207  line << point3D.second.Track().Length() << " ";
1208 
1209  for (const auto& track_el : point3D.second.Track().Elements()) {
1210  const class Image& image = Image(track_el.image_id);
1211  const class Camera& camera = Camera(image.CameraId());
1212 
1213  // Bundler output assumes image coordinate system origin
1214  // in the lower left corner of the image with the center of
1215  // the lower left pixel being (0, 0). Our coordinate system
1216  // starts in the upper left corner with the center of the
1217  // upper left pixel being (0.5, 0.5).
1218 
1219  const Point2D& point2D = image.Point2D(track_el.point2D_idx);
1220 
1221  line << image_id_to_idx_.at(track_el.image_id) << " ";
1222  line << track_el.point2D_idx << " ";
1223  line << point2D.X() - camera.PrincipalPointX() << " ";
1224  line << camera.PrincipalPointY() - point2D.Y() << " ";
1225  }
1226 
1227  std::string line_string = line.str();
1228  line_string = line_string.substr(0, line_string.size() - 1);
1229 
1230  file << line_string << std::endl;
1231  }
1232 
1233  return true;
1234 }
1235 
1236 void Reconstruction::ExportPLY(const std::string& path) const {
1237  const auto ply_points = ConvertToPLY();
1238 
1239  const bool kWriteNormal = false;
1240  const bool kWriteRGB = true;
1241  WriteBinaryPlyPoints(path, ply_points, kWriteNormal, kWriteRGB);
1242 }
1243 
1244 void Reconstruction::ExportVRML(const std::string& images_path,
1245  const std::string& points3D_path,
1246  const double image_scale,
1247  const Eigen::Vector3d& image_rgb) const {
1248  std::ofstream images_file(images_path, std::ios::trunc);
1249  CHECK(images_file.is_open()) << images_path;
1250 
1251  const double six = image_scale * 0.15;
1252  const double siy = image_scale * 0.1;
1253 
1254  std::vector<Eigen::Vector3d> points;
1255  points.emplace_back(-six, -siy, six * 1.0 * 2.0);
1256  points.emplace_back(+six, -siy, six * 1.0 * 2.0);
1257  points.emplace_back(+six, +siy, six * 1.0 * 2.0);
1258  points.emplace_back(-six, +siy, six * 1.0 * 2.0);
1259  points.emplace_back(0, 0, 0);
1260  points.emplace_back(-six / 3.0, -siy / 3.0, six * 1.0 * 2.0);
1261  points.emplace_back(+six / 3.0, -siy / 3.0, six * 1.0 * 2.0);
1262  points.emplace_back(+six / 3.0, +siy / 3.0, six * 1.0 * 2.0);
1263  points.emplace_back(-six / 3.0, +siy / 3.0, six * 1.0 * 2.0);
1264 
1265  for (const auto& image : images_) {
1266  if (!image.second.IsRegistered()) {
1267  continue;
1268  }
1269 
1270  images_file << "Shape{\n";
1271  images_file << " appearance Appearance {\n";
1272  images_file << " material DEF Default-ffRffGffB Material {\n";
1273  images_file << " ambientIntensity 0\n";
1274  images_file << " diffuseColor "
1275  << " " << image_rgb(0) << " " << image_rgb(1) << " "
1276  << image_rgb(2) << "\n";
1277  images_file << " emissiveColor 0.1 0.1 0.1 } }\n";
1278  images_file << " geometry IndexedFaceSet {\n";
1279  images_file << " solid FALSE \n";
1280  images_file << " colorPerVertex TRUE \n";
1281  images_file << " ccw TRUE \n";
1282 
1283  images_file << " coord Coordinate {\n";
1284  images_file << " point [\n";
1285 
1286  Eigen::Transform<double, 3, Eigen::Affine> transform;
1287  transform.matrix().topLeftCorner<3, 4>() =
1288  image.second.InverseProjectionMatrix();
1289 
1290  // Move camera base model to camera pose.
1291  for (size_t i = 0; i < points.size(); i++) {
1292  const Eigen::Vector3d point = transform * points[i];
1293  images_file << point(0) << " " << point(1) << " " << point(2)
1294  << "\n";
1295  }
1296 
1297  images_file << " ] }\n";
1298 
1299  images_file << "color Color {color [\n";
1300  for (size_t p = 0; p < points.size(); p++) {
1301  images_file << " " << image_rgb(0) << " " << image_rgb(1) << " "
1302  << image_rgb(2) << "\n";
1303  }
1304 
1305  images_file << "\n] }\n";
1306 
1307  images_file << "coordIndex [\n";
1308  images_file << " 0, 1, 2, 3, -1\n";
1309  images_file << " 5, 6, 4, -1\n";
1310  images_file << " 6, 7, 4, -1\n";
1311  images_file << " 7, 8, 4, -1\n";
1312  images_file << " 8, 5, 4, -1\n";
1313  images_file << " \n] \n";
1314 
1315  images_file << " texCoord TextureCoordinate { point [\n";
1316  images_file << " 1 1,\n";
1317  images_file << " 0 1,\n";
1318  images_file << " 0 0,\n";
1319  images_file << " 1 0,\n";
1320  images_file << " 0 0,\n";
1321  images_file << " 0 0,\n";
1322  images_file << " 0 0,\n";
1323  images_file << " 0 0,\n";
1324  images_file << " 0 0,\n";
1325 
1326  images_file << " ] }\n";
1327  images_file << "} }\n";
1328  }
1329 
1330  // Write 3D points
1331 
1332  std::ofstream points3D_file(points3D_path, std::ios::trunc);
1333  CHECK(points3D_file.is_open()) << points3D_path;
1334 
1335  points3D_file << "#VRML V2.0 utf8\n";
1336  points3D_file << "Background { skyColor [1.0 1.0 1.0] } \n";
1337  points3D_file << "Shape{ appearance Appearance {\n";
1338  points3D_file << " material Material {emissiveColor 1 1 1} }\n";
1339  points3D_file << " geometry PointSet {\n";
1340  points3D_file << " coord Coordinate {\n";
1341  points3D_file << " point [\n";
1342 
1343  for (const auto& point3D : points3D_) {
1344  points3D_file << point3D.second.XYZ()(0) << ", ";
1345  points3D_file << point3D.second.XYZ()(1) << ", ";
1346  points3D_file << point3D.second.XYZ()(2) << std::endl;
1347  }
1348 
1349  points3D_file << " ] }\n";
1350  points3D_file << " color Color { color [\n";
1351 
1352  for (const auto& point3D : points3D_) {
1353  points3D_file << point3D.second.Color(0) / 255.0 << ", ";
1354  points3D_file << point3D.second.Color(1) / 255.0 << ", ";
1355  points3D_file << point3D.second.Color(2) / 255.0 << std::endl;
1356  }
1357 
1358  points3D_file << " ] } } }\n";
1359 }
1360 
1362  const std::string& path) {
1363  const class Image& image = Image(image_id);
1364 
1365  Bitmap bitmap;
1366  if (!bitmap.Read(JoinPaths(path, image.Name()))) {
1367  return false;
1368  }
1369 
1370  const Eigen::Vector3ub kBlackColor(0, 0, 0);
1371  for (const Point2D& point2D : image.Points2D()) {
1372  if (point2D.HasPoint3D()) {
1373  class Point3D& point3D = Point3D(point2D.Point3DId());
1374  if (point3D.Color() == kBlackColor) {
1376  // COLMAP assumes that the upper left pixel center is (0.5,
1377  // 0.5).
1378  if (bitmap.InterpolateBilinear(point2D.X() - 0.5,
1379  point2D.Y() - 0.5, &color)) {
1380  const BitmapColor<uint8_t> color_ub = color.Cast<uint8_t>();
1381  point3D.SetColor(Eigen::Vector3ub(color_ub.r, color_ub.g,
1382  color_ub.b));
1383  }
1384  }
1385  }
1386  }
1387 
1388  return true;
1389 }
1390 
1392  std::unordered_map<point3D_t, Eigen::Vector3d> color_sums;
1393  std::unordered_map<point3D_t, size_t> color_counts;
1394 
1395  for (size_t i = 0; i < reg_image_ids_.size(); ++i) {
1396  const class Image& image = Image(reg_image_ids_[i]);
1397  const std::string image_path = JoinPaths(path, image.Name());
1398 
1399  Bitmap bitmap;
1400  if (!bitmap.Read(image_path)) {
1401  std::cout << StringPrintf("Could not read image %s at path %s.",
1402  image.Name().c_str(), image_path.c_str())
1403  << std::endl;
1404  continue;
1405  }
1406 
1407  for (const Point2D& point2D : image.Points2D()) {
1408  if (point2D.HasPoint3D()) {
1410  // COLMAP assumes that the upper left pixel center is (0.5,
1411  // 0.5).
1412  if (bitmap.InterpolateBilinear(point2D.X() - 0.5,
1413  point2D.Y() - 0.5, &color)) {
1414  if (color_sums.count(point2D.Point3DId())) {
1415  Eigen::Vector3d& color_sum =
1416  color_sums[point2D.Point3DId()];
1417  color_sum(0) += color.r;
1418  color_sum(1) += color.g;
1419  color_sum(2) += color.b;
1420  color_counts[point2D.Point3DId()] += 1;
1421  } else {
1422  color_sums.emplace(
1423  point2D.Point3DId(),
1424  Eigen::Vector3d(color.r, color.g, color.b));
1425  color_counts.emplace(point2D.Point3DId(), 1);
1426  }
1427  }
1428  }
1429  }
1430  }
1431 
1432  const Eigen::Vector3ub kBlackColor = Eigen::Vector3ub::Zero();
1433  for (auto& point3D : points3D_) {
1434  if (color_sums.count(point3D.first)) {
1435  Eigen::Vector3d color =
1436  color_sums[point3D.first] / color_counts[point3D.first];
1437  for (Eigen::Index i = 0; i < color.size(); ++i) {
1438  color[i] = std::round(color[i]);
1439  }
1440  point3D.second.SetColor(color.cast<uint8_t>());
1441  } else {
1442  point3D.second.SetColor(kBlackColor);
1443  }
1444  }
1445 }
1446 
1447 void Reconstruction::CreateImageDirs(const std::string& path) const {
1448  std::unordered_set<std::string> image_dirs;
1449  for (const auto& image : images_) {
1450  const std::vector<std::string> name_split =
1451  StringSplit(image.second.Name(), "/");
1452  if (name_split.size() > 1) {
1453  std::string dir = path;
1454  for (size_t i = 0; i < name_split.size() - 1; ++i) {
1455  dir = JoinPaths(dir, name_split[i]);
1456  image_dirs.insert(dir);
1457  }
1458  }
1459  }
1460  for (const auto& dir : image_dirs) {
1461  CreateDirIfNotExists(dir);
1462  }
1463 }
1464 
1465 size_t Reconstruction::FilterPoints3DWithSmallTriangulationAngle(
1466  const double min_tri_angle,
1467  const std::unordered_set<point3D_t>& point3D_ids) {
1468  // Number of filtered points.
1469  size_t num_filtered = 0;
1470 
1471  // Minimum triangulation angle in radians.
1472  const double min_tri_angle_rad = DegToRad(min_tri_angle);
1473 
1474  // Cache for image projection centers.
1475  std::unordered_map<image_t, Eigen::Vector3d> proj_centers;
1476 
1477  for (const auto point3D_id : point3D_ids) {
1478  if (!ExistsPoint3D(point3D_id)) {
1479  continue;
1480  }
1481 
1482  const class Point3D& point3D = Point3D(point3D_id);
1483 
1484  // Calculate triangulation angle for all pairwise combinations of image
1485  // poses in the track. Only delete point if none of the combinations
1486  // has a sufficient triangulation angle.
1487  bool keep_point = false;
1488  for (size_t i1 = 0; i1 < point3D.Track().Length(); ++i1) {
1489  const image_t image_id1 = point3D.Track().Element(i1).image_id;
1490 
1491  Eigen::Vector3d proj_center1;
1492  if (proj_centers.count(image_id1) == 0) {
1493  const class Image& image1 = Image(image_id1);
1494  proj_center1 = image1.ProjectionCenter();
1495  proj_centers.emplace(image_id1, proj_center1);
1496  } else {
1497  proj_center1 = proj_centers.at(image_id1);
1498  }
1499 
1500  for (size_t i2 = 0; i2 < i1; ++i2) {
1501  const image_t image_id2 = point3D.Track().Element(i2).image_id;
1502  const Eigen::Vector3d proj_center2 = proj_centers.at(image_id2);
1503 
1504  const double tri_angle = CalculateTriangulationAngle(
1505  proj_center1, proj_center2, point3D.XYZ());
1506 
1507  if (tri_angle >= min_tri_angle_rad) {
1508  keep_point = true;
1509  break;
1510  }
1511  }
1512 
1513  if (keep_point) {
1514  break;
1515  }
1516  }
1517 
1518  if (!keep_point) {
1519  num_filtered += 1;
1520  DeletePoint3D(point3D_id);
1521  }
1522  }
1523 
1524  return num_filtered;
1525 }
1526 
1527 size_t Reconstruction::FilterPoints3DWithLargeReprojectionError(
1528  const double max_reproj_error,
1529  const std::unordered_set<point3D_t>& point3D_ids) {
1530  const double max_squared_reproj_error = max_reproj_error * max_reproj_error;
1531 
1532  // Number of filtered points.
1533  size_t num_filtered = 0;
1534 
1535  for (const auto point3D_id : point3D_ids) {
1536  if (!ExistsPoint3D(point3D_id)) {
1537  continue;
1538  }
1539 
1540  class Point3D& point3D = Point3D(point3D_id);
1541 
1542  if (point3D.Track().Length() < 2) {
1543  DeletePoint3D(point3D_id);
1544  num_filtered += point3D.Track().Length();
1545  continue;
1546  }
1547 
1548  double reproj_error_sum = 0.0;
1549 
1550  std::vector<TrackElement> track_els_to_delete;
1551 
1552  for (const auto& track_el : point3D.Track().Elements()) {
1553  const class Image& image = Image(track_el.image_id);
1554  const class Camera& camera = Camera(image.CameraId());
1555  const Point2D& point2D = image.Point2D(track_el.point2D_idx);
1556  const double squared_reproj_error =
1558  point2D.XY(), point3D.XYZ(), image.Qvec(),
1559  image.Tvec(), camera);
1560  if (squared_reproj_error > max_squared_reproj_error) {
1561  track_els_to_delete.push_back(track_el);
1562  } else {
1563  reproj_error_sum += std::sqrt(squared_reproj_error);
1564  }
1565  }
1566 
1567  if (track_els_to_delete.size() >= point3D.Track().Length() - 1) {
1568  num_filtered += point3D.Track().Length();
1569  DeletePoint3D(point3D_id);
1570  } else {
1571  num_filtered += track_els_to_delete.size();
1572  for (const auto& track_el : track_els_to_delete) {
1573  DeleteObservation(track_el.image_id, track_el.point2D_idx);
1574  }
1575  point3D.SetError(reproj_error_sum / point3D.Track().Length());
1576  }
1577  }
1578 
1579  return num_filtered;
1580 }
1581 
1582 void Reconstruction::ReadCamerasText(const std::string& path) {
1583  cameras_.clear();
1584 
1585  std::ifstream file(path);
1586  CHECK(file.is_open()) << path;
1587 
1588  std::string line;
1589  std::string item;
1590 
1591  while (std::getline(file, line)) {
1592  StringTrim(&line);
1593 
1594  if (line.empty() || line[0] == '#') {
1595  continue;
1596  }
1597 
1598  std::stringstream line_stream(line);
1599 
1600  class Camera camera;
1601 
1602  // ID
1603  std::getline(line_stream, item, ' ');
1604  camera.SetCameraId(std::stoul(item));
1605 
1606  // MODEL
1607  std::getline(line_stream, item, ' ');
1608  camera.SetModelIdFromName(item);
1609 
1610  // WIDTH
1611  std::getline(line_stream, item, ' ');
1612  camera.SetWidth(std::stoll(item));
1613 
1614  // HEIGHT
1615  std::getline(line_stream, item, ' ');
1616  camera.SetHeight(std::stoll(item));
1617 
1618  // PARAMS
1619  camera.Params().clear();
1620  while (!line_stream.eof()) {
1621  std::getline(line_stream, item, ' ');
1622  camera.Params().push_back(std::stold(item));
1623  }
1624 
1625  CHECK(camera.VerifyParams());
1626 
1627  cameras_.emplace(camera.CameraId(), std::move(camera));
1628  }
1629 }
1630 
1631 void Reconstruction::ReadImagesText(const std::string& path) {
1632  images_.clear();
1633 
1634  std::ifstream file(path);
1635  CHECK(file.is_open()) << path;
1636 
1637  std::string line;
1638  std::string item;
1639 
1640  while (std::getline(file, line)) {
1641  StringTrim(&line);
1642 
1643  if (line.empty() || line[0] == '#') {
1644  continue;
1645  }
1646 
1647  std::stringstream line_stream1(line);
1648 
1649  // ID
1650  std::getline(line_stream1, item, ' ');
1651  const image_t image_id = std::stoul(item);
1652 
1653  class Image image;
1654  image.SetImageId(image_id);
1655 
1656  image.SetRegistered(true);
1657  reg_image_ids_.push_back(image_id);
1658 
1659  // QVEC (qw, qx, qy, qz)
1660  std::getline(line_stream1, item, ' ');
1661  image.Qvec(0) = std::stold(item);
1662 
1663  std::getline(line_stream1, item, ' ');
1664  image.Qvec(1) = std::stold(item);
1665 
1666  std::getline(line_stream1, item, ' ');
1667  image.Qvec(2) = std::stold(item);
1668 
1669  std::getline(line_stream1, item, ' ');
1670  image.Qvec(3) = std::stold(item);
1671 
1672  image.NormalizeQvec();
1673 
1674  // TVEC
1675  std::getline(line_stream1, item, ' ');
1676  image.Tvec(0) = std::stold(item);
1677 
1678  std::getline(line_stream1, item, ' ');
1679  image.Tvec(1) = std::stold(item);
1680 
1681  std::getline(line_stream1, item, ' ');
1682  image.Tvec(2) = std::stold(item);
1683 
1684  // CAMERA_ID
1685  std::getline(line_stream1, item, ' ');
1686  image.SetCameraId(std::stoul(item));
1687 
1688  // NAME
1689  std::getline(line_stream1, item, ' ');
1690  image.SetName(item);
1691 
1692  // POINTS2D
1693  if (!std::getline(file, line)) {
1694  break;
1695  }
1696 
1697  StringTrim(&line);
1698  std::stringstream line_stream2(line);
1699 
1700  std::vector<Eigen::Vector2d> points2D;
1701  std::vector<point3D_t> point3D_ids;
1702 
1703  if (!line.empty()) {
1704  while (!line_stream2.eof()) {
1705  Eigen::Vector2d point;
1706 
1707  std::getline(line_stream2, item, ' ');
1708  point.x() = std::stold(item);
1709 
1710  std::getline(line_stream2, item, ' ');
1711  point.y() = std::stold(item);
1712 
1713  points2D.push_back(point);
1714 
1715  std::getline(line_stream2, item, ' ');
1716  if (item == "-1") {
1717  point3D_ids.push_back(kInvalidPoint3DId);
1718  } else {
1719  point3D_ids.push_back(std::stoll(item));
1720  }
1721  }
1722  }
1723 
1724  image.SetUp(Camera(image.CameraId()));
1725  image.SetPoints2D(points2D);
1726 
1727  for (point2D_t point2D_idx = 0; point2D_idx < image.NumPoints2D();
1728  ++point2D_idx) {
1729  if (point3D_ids[point2D_idx] != kInvalidPoint3DId) {
1730  image.SetPoint3DForPoint2D(point2D_idx,
1731  point3D_ids[point2D_idx]);
1732  }
1733  }
1734 
1735  images_.emplace(image.ImageId(), std::move(image));
1736  }
1737 }
1738 
1739 void Reconstruction::ReadPoints3DText(const std::string& path) {
1740  points3D_.clear();
1741 
1742  std::ifstream file(path);
1743  CHECK(file.is_open()) << path;
1744 
1745  std::string line;
1746  std::string item;
1747 
1748  while (std::getline(file, line)) {
1749  StringTrim(&line);
1750 
1751  if (line.empty() || line[0] == '#') {
1752  continue;
1753  }
1754 
1755  std::stringstream line_stream(line);
1756 
1757  // ID
1758  std::getline(line_stream, item, ' ');
1759  const point3D_t point3D_id = std::stoll(item);
1760 
1761  // Make sure, that we can add new 3D points after reading 3D points
1762  // without overwriting existing 3D points.
1763  num_added_points3D_ = std::max(num_added_points3D_, point3D_id);
1764 
1765  class Point3D point3D;
1766 
1767  // XYZ
1768  std::getline(line_stream, item, ' ');
1769  point3D.XYZ(0) = std::stold(item);
1770 
1771  std::getline(line_stream, item, ' ');
1772  point3D.XYZ(1) = std::stold(item);
1773 
1774  std::getline(line_stream, item, ' ');
1775  point3D.XYZ(2) = std::stold(item);
1776 
1777  // Color
1778  std::getline(line_stream, item, ' ');
1779  point3D.Color(0) = static_cast<uint8_t>(std::stoi(item));
1780 
1781  std::getline(line_stream, item, ' ');
1782  point3D.Color(1) = static_cast<uint8_t>(std::stoi(item));
1783 
1784  std::getline(line_stream, item, ' ');
1785  point3D.Color(2) = static_cast<uint8_t>(std::stoi(item));
1786 
1787  // ERROR
1788  std::getline(line_stream, item, ' ');
1789  point3D.SetError(std::stold(item));
1790 
1791  // TRACK
1792  while (!line_stream.eof()) {
1793  TrackElement track_el;
1794 
1795  std::getline(line_stream, item, ' ');
1796  StringTrim(&item);
1797  if (item.empty()) {
1798  break;
1799  }
1800  track_el.image_id = std::stoul(item);
1801 
1802  std::getline(line_stream, item, ' ');
1803  track_el.point2D_idx = std::stoul(item);
1804 
1805  point3D.Track().AddElement(track_el);
1806  }
1807 
1808  point3D.Track().Compress();
1809 
1810  points3D_.emplace(point3D_id, std::move(point3D));
1811  }
1812 }
1813 
1814 void Reconstruction::ReadCamerasBinary(const std::string& path) {
1815  std::ifstream file(path, std::ios::binary);
1816  CHECK(file.is_open()) << path;
1817 
1818  const size_t num_cameras = ReadBinaryLittleEndian<uint64_t>(&file);
1819  for (size_t i = 0; i < num_cameras; ++i) {
1820  class Camera camera;
1821  camera.SetCameraId(ReadBinaryLittleEndian<camera_t>(&file));
1822  camera.SetModelId(ReadBinaryLittleEndian<int>(&file));
1823  camera.SetWidth(ReadBinaryLittleEndian<uint64_t>(&file));
1824  camera.SetHeight(ReadBinaryLittleEndian<uint64_t>(&file));
1825  ReadBinaryLittleEndian<double>(&file, &camera.Params());
1826  CHECK(camera.VerifyParams());
1827  cameras_.emplace(camera.CameraId(), std::move(camera));
1828  }
1829 }
1830 
1831 void Reconstruction::ReadImagesBinary(const std::string& path) {
1832  std::ifstream file(path, std::ios::binary);
1833  CHECK(file.is_open()) << path;
1834 
1835  const size_t num_reg_images = ReadBinaryLittleEndian<uint64_t>(&file);
1836  for (size_t i = 0; i < num_reg_images; ++i) {
1837  class Image image;
1838 
1839  image.SetImageId(ReadBinaryLittleEndian<image_t>(&file));
1840 
1841  image.Qvec(0) = ReadBinaryLittleEndian<double>(&file);
1842  image.Qvec(1) = ReadBinaryLittleEndian<double>(&file);
1843  image.Qvec(2) = ReadBinaryLittleEndian<double>(&file);
1844  image.Qvec(3) = ReadBinaryLittleEndian<double>(&file);
1845  image.NormalizeQvec();
1846 
1847  image.Tvec(0) = ReadBinaryLittleEndian<double>(&file);
1848  image.Tvec(1) = ReadBinaryLittleEndian<double>(&file);
1849  image.Tvec(2) = ReadBinaryLittleEndian<double>(&file);
1850 
1851  image.SetCameraId(ReadBinaryLittleEndian<camera_t>(&file));
1852 
1853  char name_char;
1854  do {
1855  file.read(&name_char, 1);
1856  if (name_char != '\0') {
1857  image.Name() += name_char;
1858  }
1859  } while (name_char != '\0');
1860 
1861  const size_t num_points2D = ReadBinaryLittleEndian<uint64_t>(&file);
1862 
1863  std::vector<Eigen::Vector2d> points2D;
1864  points2D.reserve(num_points2D);
1865  std::vector<point3D_t> point3D_ids;
1866  point3D_ids.reserve(num_points2D);
1867  for (size_t j = 0; j < num_points2D; ++j) {
1868  const double x = ReadBinaryLittleEndian<double>(&file);
1869  const double y = ReadBinaryLittleEndian<double>(&file);
1870  points2D.emplace_back(x, y);
1871  point3D_ids.push_back(ReadBinaryLittleEndian<point3D_t>(&file));
1872  }
1873 
1874  image.SetUp(Camera(image.CameraId()));
1875  image.SetPoints2D(points2D);
1876 
1877  for (point2D_t point2D_idx = 0; point2D_idx < image.NumPoints2D();
1878  ++point2D_idx) {
1879  if (point3D_ids[point2D_idx] != kInvalidPoint3DId) {
1880  image.SetPoint3DForPoint2D(point2D_idx,
1881  point3D_ids[point2D_idx]);
1882  }
1883  }
1884 
1885  image.SetRegistered(true);
1886  reg_image_ids_.push_back(image.ImageId());
1887 
1888  images_.emplace(image.ImageId(), std::move(image));
1889  }
1890 }
1891 
1892 void Reconstruction::ReadPoints3DBinary(const std::string& path) {
1893  std::ifstream file(path, std::ios::binary);
1894  CHECK(file.is_open()) << path;
1895 
1896  const size_t num_points3D = ReadBinaryLittleEndian<uint64_t>(&file);
1897  for (size_t i = 0; i < num_points3D; ++i) {
1898  class Point3D point3D;
1899 
1900  const point3D_t point3D_id = ReadBinaryLittleEndian<point3D_t>(&file);
1901  num_added_points3D_ = std::max(num_added_points3D_, point3D_id);
1902 
1903  point3D.XYZ()(0) = ReadBinaryLittleEndian<double>(&file);
1904  point3D.XYZ()(1) = ReadBinaryLittleEndian<double>(&file);
1905  point3D.XYZ()(2) = ReadBinaryLittleEndian<double>(&file);
1906  point3D.Color(0) = ReadBinaryLittleEndian<uint8_t>(&file);
1907  point3D.Color(1) = ReadBinaryLittleEndian<uint8_t>(&file);
1908  point3D.Color(2) = ReadBinaryLittleEndian<uint8_t>(&file);
1909  point3D.SetError(ReadBinaryLittleEndian<double>(&file));
1910 
1911  const size_t track_length = ReadBinaryLittleEndian<uint64_t>(&file);
1912  for (size_t j = 0; j < track_length; ++j) {
1913  const image_t image_id = ReadBinaryLittleEndian<image_t>(&file);
1914  const point2D_t point2D_idx =
1915  ReadBinaryLittleEndian<point2D_t>(&file);
1916  point3D.Track().AddElement(image_id, point2D_idx);
1917  }
1918  point3D.Track().Compress();
1919 
1920  points3D_.emplace(point3D_id, std::move(point3D));
1921  }
1922 }
1923 
1924 void Reconstruction::WriteCamerasText(const std::string& path) const {
1925  std::ofstream file(path, std::ios::trunc);
1926  CHECK(file.is_open()) << path;
1927 
1928  // Ensure that we don't loose any precision by storing in text.
1929  file.precision(17);
1930 
1931  file << "# Camera list with one line of data per camera:" << std::endl;
1932  file << "# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]" << std::endl;
1933  file << "# Number of cameras: " << cameras_.size() << std::endl;
1934 
1935  for (const auto& camera : cameras_) {
1936  std::ostringstream line;
1937 
1938  line << camera.first << " ";
1939  line << camera.second.ModelName() << " ";
1940  line << camera.second.Width() << " ";
1941  line << camera.second.Height() << " ";
1942 
1943  for (const double param : camera.second.Params()) {
1944  line << param << " ";
1945  }
1946 
1947  std::string line_string = line.str();
1948  line_string = line_string.substr(0, line_string.size() - 1);
1949 
1950  file << line_string << std::endl;
1951  }
1952 }
1953 
1954 void Reconstruction::WriteImagesText(const std::string& path) const {
1955  std::ofstream file(path, std::ios::trunc);
1956  CHECK(file.is_open()) << path;
1957 
1958  // Ensure that we don't loose any precision by storing in text.
1959  file.precision(17);
1960 
1961  file << "# Image list with two lines of data per image:" << std::endl;
1962  file << "# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, "
1963  "NAME"
1964  << std::endl;
1965  file << "# POINTS2D[] as (X, Y, POINT3D_ID)" << std::endl;
1966  file << "# Number of images: " << reg_image_ids_.size()
1967  << ", mean observations per image: "
1969 
1970  for (const auto& image : images_) {
1971  if (!image.second.IsRegistered()) {
1972  continue;
1973  }
1974 
1975  std::ostringstream line;
1976  std::string line_string;
1977 
1978  line << image.first << " ";
1979 
1980  // QVEC (qw, qx, qy, qz)
1981  const Eigen::Vector4d normalized_qvec =
1982  NormalizeQuaternion(image.second.Qvec());
1983  line << normalized_qvec(0) << " ";
1984  line << normalized_qvec(1) << " ";
1985  line << normalized_qvec(2) << " ";
1986  line << normalized_qvec(3) << " ";
1987 
1988  // TVEC
1989  line << image.second.Tvec(0) << " ";
1990  line << image.second.Tvec(1) << " ";
1991  line << image.second.Tvec(2) << " ";
1992 
1993  line << image.second.CameraId() << " ";
1994 
1995  line << image.second.Name();
1996 
1997  file << line.str() << std::endl;
1998 
1999  line.str("");
2000  line.clear();
2001 
2002  for (const Point2D& point2D : image.second.Points2D()) {
2003  line << point2D.X() << " ";
2004  line << point2D.Y() << " ";
2005  if (point2D.HasPoint3D()) {
2006  line << point2D.Point3DId() << " ";
2007  } else {
2008  line << -1 << " ";
2009  }
2010  }
2011  line_string = line.str();
2012  line_string = line_string.substr(0, line_string.size() - 1);
2013  file << line_string << std::endl;
2014  }
2015 }
2016 
2017 void Reconstruction::WritePoints3DText(const std::string& path) const {
2018  std::ofstream file(path, std::ios::trunc);
2019  CHECK(file.is_open()) << path;
2020 
2021  // Ensure that we don't loose any precision by storing in text.
2022  file.precision(17);
2023 
2024  file << "# 3D point list with one line of data per point:" << std::endl;
2025  file << "# POINT3D_ID, X, Y, Z, R, G, B, ERROR, "
2026  "TRACK[] as (IMAGE_ID, POINT2D_IDX)"
2027  << std::endl;
2028  file << "# Number of points: " << points3D_.size()
2029  << ", mean track length: " << ComputeMeanTrackLength() << std::endl;
2030 
2031  for (const auto& point3D : points3D_) {
2032  file << point3D.first << " ";
2033  file << point3D.second.XYZ()(0) << " ";
2034  file << point3D.second.XYZ()(1) << " ";
2035  file << point3D.second.XYZ()(2) << " ";
2036  file << static_cast<int>(point3D.second.Color(0)) << " ";
2037  file << static_cast<int>(point3D.second.Color(1)) << " ";
2038  file << static_cast<int>(point3D.second.Color(2)) << " ";
2039  file << point3D.second.Error() << " ";
2040 
2041  std::ostringstream line;
2042 
2043  for (const auto& track_el : point3D.second.Track().Elements()) {
2044  line << track_el.image_id << " ";
2045  line << track_el.point2D_idx << " ";
2046  }
2047 
2048  std::string line_string = line.str();
2049  line_string = line_string.substr(0, line_string.size() - 1);
2050 
2051  file << line_string << std::endl;
2052  }
2053 }
2054 
2055 void Reconstruction::WriteCamerasBinary(const std::string& path) const {
2056  std::ofstream file(path, std::ios::trunc | std::ios::binary);
2057  CHECK(file.is_open()) << path;
2058 
2059  WriteBinaryLittleEndian<uint64_t>(&file, cameras_.size());
2060 
2061  for (const auto& camera : cameras_) {
2062  WriteBinaryLittleEndian<camera_t>(&file, camera.first);
2063  WriteBinaryLittleEndian<int>(&file, camera.second.ModelId());
2064  WriteBinaryLittleEndian<uint64_t>(&file, camera.second.Width());
2065  WriteBinaryLittleEndian<uint64_t>(&file, camera.second.Height());
2066  for (const double param : camera.second.Params()) {
2067  WriteBinaryLittleEndian<double>(&file, param);
2068  }
2069  }
2070 }
2071 
2072 void Reconstruction::WriteImagesBinary(const std::string& path) const {
2073  std::ofstream file(path, std::ios::trunc | std::ios::binary);
2074  CHECK(file.is_open()) << path;
2075 
2076  WriteBinaryLittleEndian<uint64_t>(&file, reg_image_ids_.size());
2077 
2078  for (const auto& image : images_) {
2079  if (!image.second.IsRegistered()) {
2080  continue;
2081  }
2082 
2083  WriteBinaryLittleEndian<image_t>(&file, image.first);
2084 
2085  const Eigen::Vector4d normalized_qvec =
2086  NormalizeQuaternion(image.second.Qvec());
2087  WriteBinaryLittleEndian<double>(&file, normalized_qvec(0));
2088  WriteBinaryLittleEndian<double>(&file, normalized_qvec(1));
2089  WriteBinaryLittleEndian<double>(&file, normalized_qvec(2));
2090  WriteBinaryLittleEndian<double>(&file, normalized_qvec(3));
2091 
2092  WriteBinaryLittleEndian<double>(&file, image.second.Tvec(0));
2093  WriteBinaryLittleEndian<double>(&file, image.second.Tvec(1));
2094  WriteBinaryLittleEndian<double>(&file, image.second.Tvec(2));
2095 
2096  WriteBinaryLittleEndian<camera_t>(&file, image.second.CameraId());
2097 
2098  const std::string name = image.second.Name() + '\0';
2099  file.write(name.c_str(), name.size());
2100 
2101  WriteBinaryLittleEndian<uint64_t>(&file, image.second.NumPoints2D());
2102  for (const Point2D& point2D : image.second.Points2D()) {
2103  WriteBinaryLittleEndian<double>(&file, point2D.X());
2104  WriteBinaryLittleEndian<double>(&file, point2D.Y());
2105  WriteBinaryLittleEndian<point3D_t>(&file, point2D.Point3DId());
2106  }
2107  }
2108 }
2109 
2110 void Reconstruction::WritePoints3DBinary(const std::string& path) const {
2111  std::ofstream file(path, std::ios::trunc | std::ios::binary);
2112  CHECK(file.is_open()) << path;
2113 
2114  WriteBinaryLittleEndian<uint64_t>(&file, points3D_.size());
2115 
2116  for (const auto& point3D : points3D_) {
2117  WriteBinaryLittleEndian<point3D_t>(&file, point3D.first);
2118  WriteBinaryLittleEndian<double>(&file, point3D.second.XYZ()(0));
2119  WriteBinaryLittleEndian<double>(&file, point3D.second.XYZ()(1));
2120  WriteBinaryLittleEndian<double>(&file, point3D.second.XYZ()(2));
2121  WriteBinaryLittleEndian<uint8_t>(&file, point3D.second.Color(0));
2122  WriteBinaryLittleEndian<uint8_t>(&file, point3D.second.Color(1));
2123  WriteBinaryLittleEndian<uint8_t>(&file, point3D.second.Color(2));
2124  WriteBinaryLittleEndian<double>(&file, point3D.second.Error());
2125 
2126  WriteBinaryLittleEndian<uint64_t>(&file,
2127  point3D.second.Track().Length());
2128  for (const auto& track_el : point3D.second.Track().Elements()) {
2129  WriteBinaryLittleEndian<image_t>(&file, track_el.image_id);
2130  WriteBinaryLittleEndian<point2D_t>(&file, track_el.point2D_idx);
2131  }
2132  }
2133 }
2134 
2135 void Reconstruction::SetObservationAsTriangulated(
2136  const image_t image_id,
2137  const point2D_t point2D_idx,
2138  const bool is_continued_point3D) {
2139  if (correspondence_graph_ == nullptr) {
2140  return;
2141  }
2142 
2143  const class Image& image = Image(image_id);
2144  const Point2D& point2D = image.Point2D(point2D_idx);
2145  const std::vector<CorrespondenceGraph::Correspondence>& corrs =
2146  correspondence_graph_->FindCorrespondences(image_id, point2D_idx);
2147 
2148  CHECK(image.IsRegistered());
2149  CHECK(point2D.HasPoint3D());
2150 
2151  for (const auto& corr : corrs) {
2152  class Image& corr_image = Image(corr.image_id);
2153  const Point2D& corr_point2D = corr_image.Point2D(corr.point2D_idx);
2154  corr_image.IncrementCorrespondenceHasPoint3D(corr.point2D_idx);
2155  // Update number of shared 3D points between image pairs and make sure
2156  // to only count the correspondences once (not twice forward and
2157  // backward).
2158  if (point2D.Point3DId() == corr_point2D.Point3DId() &&
2159  (is_continued_point3D || image_id < corr.image_id)) {
2160  const image_pair_t pair_id =
2161  Database::ImagePairToPairId(image_id, corr.image_id);
2162  image_pair_stats_[pair_id].num_tri_corrs += 1;
2163  CHECK_LE(image_pair_stats_[pair_id].num_tri_corrs,
2164  image_pair_stats_[pair_id].num_total_corrs)
2165  << "The correspondence graph graph must not contain "
2166  "duplicate "
2167  "matches";
2168  }
2169  }
2170 }
2171 
2172 void Reconstruction::ResetTriObservations(const image_t image_id,
2173  const point2D_t point2D_idx,
2174  const bool is_deleted_point3D) {
2175  if (correspondence_graph_ == nullptr) {
2176  return;
2177  }
2178 
2179  const class Image& image = Image(image_id);
2180  const Point2D& point2D = image.Point2D(point2D_idx);
2181  const std::vector<CorrespondenceGraph::Correspondence>& corrs =
2182  correspondence_graph_->FindCorrespondences(image_id, point2D_idx);
2183 
2184  CHECK(image.IsRegistered());
2185  CHECK(point2D.HasPoint3D());
2186 
2187  for (const auto& corr : corrs) {
2188  class Image& corr_image = Image(corr.image_id);
2189  const Point2D& corr_point2D = corr_image.Point2D(corr.point2D_idx);
2190  corr_image.DecrementCorrespondenceHasPoint3D(corr.point2D_idx);
2191  // Update number of shared 3D points between image pairs and make sure
2192  // to only count the correspondences once (not twice forward and
2193  // backward).
2194  if (point2D.Point3DId() == corr_point2D.Point3DId() &&
2195  (!is_deleted_point3D || image_id < corr.image_id)) {
2196  const image_pair_t pair_id =
2197  Database::ImagePairToPairId(image_id, corr.image_id);
2198  image_pair_stats_[pair_id].num_tri_corrs -= 1;
2199  CHECK_GE(image_pair_stats_[pair_id].num_tri_corrs, 0)
2200  << "The scene graph graph must not contain duplicate "
2201  "matches";
2202  }
2203  }
2204 }
2205 
2206 } // namespace colmap
std::shared_ptr< core::Tensor > image
std::string name
int points
math::float4 color
bool Read(const std::string &path, const bool as_rgb=true)
Definition: bitmap.cc:485
bool InterpolateBilinear(const double x, const double y, BitmapColor< float > *color) const
Definition: bitmap.cc:248
bool VerifyParams() const
Definition: camera.cc:182
double FocalLengthY() const
Definition: camera.cc:121
double MeanFocalLength() const
Definition: camera.cc:100
const std::vector< double > & Params() const
Definition: camera.h:176
camera_t CameraId() const
Definition: camera.h:154
int ModelId() const
Definition: camera.h:158
bool HasBogusParams(const double min_focal_length_ratio, const double max_focal_length_ratio, const double max_extra_param) const
Definition: camera.cc:186
void SetCameraId(const camera_t camera_id)
Definition: camera.h:156
double FocalLengthX() const
Definition: camera.cc:115
const std::vector< size_t > & FocalLengthIdxs() const
Definition: camera.cc:63
double PrincipalPointX() const
Definition: camera.cc:146
size_t Width() const
Definition: camera.h:160
double PrincipalPointY() const
Definition: camera.cc:152
size_t Height() const
Definition: camera.h:162
const std::vector< Correspondence > & FindCorrespondences(const image_t image_id, const point2D_t point2D_idx) const
point2D_t NumCorrespondencesBetweenImages(const image_t image_id1, const image_t image_id2) const
size_t NumImages() const
const std::unordered_map< camera_t, class Camera > & Cameras() const
size_t NumCameras() const
const std::unordered_map< image_t, class Image > & Images() const
const class CorrespondenceGraph & CorrespondenceGraph() const
static image_pair_t ImagePairToPairId(const image_t image_id1, const image_t image_id2)
Definition: database.h:339
bool ExistsImageWithName(std::string name) const
Definition: database.cc:303
Image ReadImageWithName(const std::string &name) const
Definition: database.cc:407
point2D_t NumPoints3D() const
Definition: image.h:265
void SetName(const std::string &name)
Definition: image.h:246
void SetPoints2D(const std::vector< Eigen::Vector2d > &points)
Definition: image.cc:71
void SetRegistered(const bool registered)
Definition: image.h:259
void ResetPoint3DForPoint2D(const point2D_t point2D_idx)
Definition: image.cc:97
void SetTvecPrior(const Eigen::Vector3d &tvec)
Definition: image.h:347
void SetImageId(const image_t image_id)
Definition: image.h:240
void SetCameraId(const camera_t camera_id)
Definition: image.h:250
const class Point2D & Point2D(const point2D_t point2D_idx) const
Definition: image.h:349
void SetQvec(const Eigen::Vector4d &qvec)
Definition: image.h:309
const std::string & Name() const
Definition: image.h:242
void SetNumObservations(const point2D_t num_observations)
Definition: image.h:269
void SetQvecPrior(const Eigen::Vector4d &qvec)
Definition: image.h:323
Eigen::Vector3d ProjectionCenter() const
Definition: image.cc:152
point2D_t NumPoints2D() const
Definition: image.h:261
void SetTvec(const Eigen::Vector3d &tvec)
Definition: image.h:333
void SetNumCorrespondences(const point2D_t num_observations)
Definition: image.h:275
point3D_t Point3DId() const
Definition: point2d.h:63
double X() const
Definition: point2d.h:57
double Y() const
Definition: point2d.h:59
bool HasPoint3D() const
Definition: point2d.h:65
void SetTrack(const class Track &track)
Definition: point3d.h:110
const Eigen::Vector3d & XYZ() const
Definition: point3d.h:74
const class Track & Track() const
Definition: point3d.h:106
void SetColor(const Eigen::Vector3ub &color)
Definition: point3d.h:98
void SetXYZ(const Eigen::Vector3d &xyz)
Definition: point3d.h:88
const Eigen::Vector3ub & Color() const
Definition: point3d.h:90
void ReadBinary(const std::string &path)
size_t NumImages() const
void ExtractColorsForAllImages(const std::string &path)
bool ExportCam(const std::string &path, bool skip_distortion=false) const
size_t FilterAllPoints3D(const double max_reproj_error, const double min_tri_angle)
Reconstruction Crop(const std::pair< Eigen::Vector3d, Eigen::Vector3d > &bbox) const
double ComputeMeanTrackLength() const
point3D_t MergePoints3D(const point3D_t point3D_id1, const point3D_t point3D_id2)
size_t NumRegImages() const
Reconstruction & operator=(const Reconstruction &other)
void Transform(const SimilarityTransform3 &tform)
std::pair< Eigen::Vector3d, Eigen::Vector3d > ComputeBoundingBox(const double p0=0.0, const double p1=1.0) const
size_t FilterObservationsWithNegativeDepth()
void Write(const std::string &path) const
std::vector< image_t > FindCommonRegImageIds(const Reconstruction &reconstruction) const
void Normalize(const double extent=10.0, const double p0=0.1, const double p1=0.9, const bool use_images=true)
bool IsImageRegistered(const image_t image_id) const
bool ExportNVM(const std::string &path, bool skip_distortion=false) const
void AddImage(const class Image &image)
bool ExtractColorsForImage(const image_t image_id, const std::string &path)
const std::unordered_map< image_t, class Image > & Images() const
void AddCamera(const class Camera &camera)
std::vector< image_t > FilterImages(const double min_focal_length_ratio, const double max_focal_length_ratio, const double max_extra_param)
void DeleteObservation(const image_t image_id, const point2D_t point2D_idx)
size_t ComputeNumObservations() const
Eigen::Vector3d ComputeCentroid(const double p0=0.1, const double p1=0.9) const
const class Image & Image(const image_t image_id) const
bool ExportRecon3D(const std::string &path, bool skip_distortion=false) const
void ExportPLY(const std::string &path) const
std::unordered_set< point3D_t > Point3DIds() const
const class Camera & Camera(const camera_t camera_id) const
void WriteBinary(const std::string &path) const
std::vector< PlyPoint > ConvertToPLY() const
bool Merge(const Reconstruction &reconstruction, const double max_reproj_error)
void ExportVRML(const std::string &images_path, const std::string &points3D_path, const double image_scale, const Eigen::Vector3d &image_rgb) const
bool ExportBundler(const std::string &path, const std::string &list_path, bool skip_distortion=false) const
void CreateImageDirs(const std::string &path) const
const std::unordered_map< camera_t, class Camera > & Cameras() const
void Load(const DatabaseCache &database_cache)
const std::unordered_map< point3D_t, class Point3D > & Points3D() const
bool ExistsCamera(const camera_t camera_id) const
void AddObservation(const point3D_t point3D_id, const TrackElement &track_el)
const class Image * FindImageWithName(const std::string &name) const
const std::unordered_map< image_pair_t, ImagePairStat > & ImagePairs() const
void DeRegisterImage(const image_t image_id)
void ReadText(const std::string &path)
const std::vector< image_t > & RegImageIds() const
size_t FilterPoints3D(const double max_reproj_error, const double min_tri_angle, const std::unordered_set< point3D_t > &point3D_ids)
void DeletePoint3D(const point3D_t point3D_id)
double ComputeMeanReprojectionError() const
bool ExistsImage(const image_t image_id) const
double ComputeMeanObservationsPerRegImage() const
void RegisterImage(const image_t image_id)
void WriteText(const std::string &path) const
size_t FilterPoints3DInImages(const double max_reproj_error, const double min_tri_angle, const std::unordered_set< image_t > &image_ids)
void ImportPLY(const std::string &path)
void Read(const std::string &path)
const class Point3D & Point3D(const point3D_t point3D_id) const
point3D_t AddPoint3D(const Eigen::Vector3d &xyz, const Track &track, const Eigen::Vector3ub &color=Eigen::Vector3ub::Zero())
void SetUp(const CorrespondenceGraph *correspondence_graph)
size_t NumAddedPoints3D() const
bool ExistsPoint3D(const point3D_t point3D_id) const
const CorrespondenceGraph * GetCorrespondenceGraph() const
void TranscribeImageIdsToDatabase(const Database &database)
void TransformPose(Eigen::Vector4d *qvec, Eigen::Vector3d *tvec) const
void TransformPoint(Eigen::Vector3d *xyz) const
void AddElement(const TrackElement &element)
Definition: track.h:103
size_t Length() const
Definition: track.h:82
void AddElements(const std::vector< TrackElement > &elements)
Definition: track.h:111
const std::vector< TrackElement > & Elements() const
Definition: track.h:84
void DeleteElement(const size_t idx)
Definition: track.h:115
void Reserve(const size_t num_elements)
Definition: track.h:120
const double * e
normal_z y
normal_z x
Matrix< double, 3, 4 > Matrix3x4d
Definition: types.h:39
Matrix< uint8_t, 3, 1 > Vector3ub
Definition: types.h:42
QTextStream & endl(QTextStream &stream)
Definition: QtCompat.h:718
CLOUDVIEWER_HOST_DEVICE Pair< First, Second > make_pair(const First &_first, const Second &_second)
Definition: SlabTraits.h:49
static const std::string path
Definition: PointCloud.cpp:59
uint64_t image_pair_t
Definition: types.h:64
bool ComputeAlignmentBetweenReconstructions(const Reconstruction &src_reconstruction, const Reconstruction &ref_reconstruction, const double min_inlier_observations, const double max_reproj_error, Eigen::Matrix3x4d *alignment)
double CalculateSquaredReprojectionError(const Eigen::Vector2d &point2D, const Eigen::Vector3d &point3D, const Eigen::Vector4d &qvec, const Eigen::Vector3d &tvec, const Camera &camera)
Definition: projection.cc:111
bool HasPointPositiveDepth(const Eigen::Matrix3x4d &proj_matrix, const Eigen::Vector3d &point3D)
Definition: projection.cc:191
uint64_t point3D_t
Definition: types.h:72
Eigen::Vector4d ComposeIdentityQuaternion()
Definition: pose.h:209
void StringTrim(std::string *str)
Definition: string.cc:188
Eigen::Vector4d NormalizeQuaternion(const Eigen::Vector4d &qvec)
Definition: pose.cc:82
uint32_t point2D_t
Definition: types.h:67
std::string StringReplace(const std::string &str, const std::string &old_str, const std::string &new_str)
Definition: string.cc:140
void WriteBinaryPlyPoints(const std::string &path, const std::vector< PlyPoint > &points, const bool write_normal, const bool write_rgb)
Definition: ply.cc:369
void SplitFileExtension(const std::string &path, std::string *root, std::string *ext)
Definition: misc.cc:64
bool ExistsFile(const std::string &path)
Definition: misc.cc:100
void CreateDirIfNotExists(const std::string &path)
Definition: misc.cc:112
std::string JoinPaths(T const &... paths)
Definition: misc.h:128
std::string EnsureTrailingSlash(const std::string &str)
Definition: misc.cc:40
const point3D_t kInvalidPoint3DId
Definition: types.h:80
uint32_t image_t
Definition: types.h:61
std::vector< std::string > StringSplit(const std::string &str, const std::string &delim)
Definition: string.cc:166
std::string StringPrintf(const char *format,...)
Definition: string.cc:131
double CalculateTriangulationAngle(const Eigen::Vector3d &proj_center1, const Eigen::Vector3d &proj_center2, const Eigen::Vector3d &point3D)
float DegToRad(const float deg)
Definition: math.h:171
std::vector< PlyPoint > ReadPly(const std::string &path)
Definition: ply.cc:43
Eigen::Matrix3d QuaternionToRotationMatrix(const Eigen::Vector4d &qvec)
Definition: pose.cc:75
Eigen::MatrixXd::Index Index
Definition: knncpp.h:26
static const int model_id
uint8_t b
Definition: ply.h:26
float z
Definition: ply.h:20
uint8_t r
Definition: ply.h:24
uint8_t g
Definition: ply.h:25
float x
Definition: ply.h:18
float y
Definition: ply.h:19
static const int model_id
static const std::vector< size_t > extra_params_idxs
static const std::vector< size_t > extra_params_idxs
image_t image_id
Definition: track.h:22
point2D_t point2D_idx
Definition: track.h:24