ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
texturing_controller.cc
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - CloudViewer: www.cloudViewer.org -
3 // ----------------------------------------------------------------------------
4 // Copyright (c) 2018-2024 www.cloudViewer.org
5 // SPDX-License-Identifier: MIT
6 // ----------------------------------------------------------------------------
7 
9 
10 #include <cmath>
11 #include <fstream>
12 
13 #include "base/reconstruction.h"
14 #include "mvs/workspace.h"
15 #include "util/logging.h"
16 
17 // CV_CORE_LIB
18 #include <CVLog.h>
19 #include <CVTools.h>
20 #include <FileSystem.h>
21 
22 // CV_DB_LIB
24 
25 // CV_IO_LIB
26 #include <AutoIO.h>
27 #include <ImageIO.h>
28 #include <ecvHObjectCaster.h>
29 #include <ecvMesh.h>
30 #include <ecvPointCloud.h>
31 
32 // Local
33 #include "mvs/texturing.h"
34 
35 namespace colmap {
36 
38  const TexturingOptions& options,
39  const Reconstruction& reconstruction,
40  const std::string& image_path,
41  const std::string& output_path,
42  const std::vector<image_t>& image_ids)
43  : options_(options),
44  image_path_(image_path),
45  output_path_(output_path),
46  image_ids_(image_ids),
47  reconstruction_(reconstruction) {
48  camera_trajectory_ = std::make_shared<cloudViewer::camera::PinholeCameraTrajectory>();
49 }
50 
51 void TexturingReconstruction::Run() {
52  PrintHeading1("Mesh Texturing");
53 
54  if (options_.verbose) {
55  options_.Print();
56  }
57 
58  CreateDirIfNotExists(JoinPaths(output_path_, "images"));
59  CreateDirIfNotExists(JoinPaths(output_path_, "sparse"));
60  CreateDirIfNotExists(JoinPaths(output_path_, "stereo"));
61  CreateDirIfNotExists(JoinPaths(output_path_, "stereo/depth_maps"));
62 
63  // Create workspace for accessing depth maps and normal maps
64  if (options_.use_depth_normal_maps) {
65  colmap::mvs::Workspace::Options workspace_options;
66  workspace_options.workspace_path = output_path_;
67  workspace_options.workspace_format = "COLMAP";
68  workspace_options.input_type = options_.depth_map_type;
69  workspace_options.stereo_folder = "stereo";
70  workspace_options.max_image_size = -1;
71  workspace_options.image_as_rgb = true;
72  workspace_options.cache_size = 32.0;
73  workspace_options.num_threads = -1;
74 
75  workspace_ = std::make_unique<colmap::mvs::CachedWorkspace>(
76  workspace_options);
77  }
79  options_.textured_file_path);
80  CreateDirIfNotExists(parent_path);
81  CreateDirIfNotExists(JoinPaths(parent_path, "images"));
82 
83  ThreadPool thread_pool;
84  std::vector<std::future<bool>> futures;
85  futures.reserve(reconstruction_.NumRegImages());
86  camera_trajectory_->parameters_.clear();
87  if (image_ids_.empty()) {
88  camera_trajectory_->parameters_.resize(reconstruction_.NumRegImages());
89  for (size_t i = 0; i < reconstruction_.NumRegImages(); ++i) {
90  const image_t image_id = reconstruction_.RegImageIds().at(i);
91  futures.push_back(thread_pool.AddTask(
92  &TexturingReconstruction::Texturing, this, image_id, i));
93  }
94  } else {
95  std::size_t index = 0;
96  camera_trajectory_->parameters_.resize(image_ids_.size());
97  for (const image_t image_id : image_ids_) {
98  futures.push_back(
99  thread_pool.AddTask(&TexturingReconstruction::Texturing,
100  this, image_id, index));
101  index += 1;
102  }
103  }
104 
105  // Only use the image names for the successfully textured mesh
106  // when writing the MVS config files
107  image_names_.clear();
108  for (size_t i = 0; i < futures.size(); ++i) {
109  if (IsStopped()) {
110  break;
111  }
112 
113  if (options_.verbose) {
114  std::cout << StringPrintf("texture image [%d/%d]\n", i + 1, futures.size());
115  }
116 
117  if (futures[i].get()) {
118  if (image_ids_.empty()) {
119  const image_t image_id = reconstruction_.RegImageIds().at(i);
120  image_names_.push_back(reconstruction_.Image(image_id).Name());
121  } else {
122  image_names_.push_back(
123  reconstruction_.Image(image_ids_[i]).Name());
124  }
125  }
126  }
127 
128  // check camera trajectory validation
129  for (size_t i = 0; i < camera_trajectory_->parameters_.size(); ++i) {
130  auto& cameraParams = camera_trajectory_->parameters_[i];
131  if (!cameraParams.intrinsic_.IsValid()) {
132  std::cerr << "ERROR: " << StringPrintf(
133  "Invalid camera intrinsic parameters found and ignore "
134  "texturing!\n");
135  return;
136  }
137  }
138 
139  // Load mesh once and reuse for both filtering and texturing
140  ccMesh mesh;
141  bool mesh_loaded = false;
142  if (!options_.meshed_file_path.empty() &&
143  ExistsFile(options_.meshed_file_path)) {
144  if (!mesh.CreateInternalCloud()) {
145  std::cerr << "ERROR: " << StringPrintf("creating internal cloud failed!\n");
146  return;
147  }
149  mesh_options.print_progress = false;
151  mesh_options)) {
152  mesh_loaded = true;
153  if (!mesh.hasNormals()) {
154  mesh.computeNormals(true);
155  }
156  if (options_.verbose) {
157  std::cout << StringPrintf("Loaded mesh: %zu vertices, %zu triangles\n",
158  mesh.getAssociatedCloud()
159  ? mesh.getAssociatedCloud()->size()
160  : 0,
161  mesh.size());
162  }
163  } else {
164  if (options_.verbose) {
165  std::cerr << "WARNING: " << StringPrintf("Failed to load mesh from %s\n",
166  options_.meshed_file_path.c_str());
167  }
168  }
169  } else {
170  std::cerr << "ERROR: " << StringPrintf("Mesh file path is empty or mesh file does not exist: %s\n",
171  options_.meshed_file_path.c_str());
172  return;
173  }
174 
175  // Filter camera trajectory based on depth/normal maps if enabled
176  std::shared_ptr<cloudViewer::camera::PinholeCameraTrajectory> filtered_trajectory =
177  camera_trajectory_;
178  if (options_.use_depth_normal_maps && workspace_) {
179  filtered_trajectory = FilterCameraTrajectory(&mesh);
180  if (options_.verbose) {
181  std::cout << StringPrintf("Filtered camera trajectory: %zu -> %zu cameras\n",
182  camera_trajectory_->parameters_.size(),
183  filtered_trajectory->parameters_.size());
184  }
185 
186  // Additional safety check: ensure filtered trajectory is valid
187  if (filtered_trajectory->parameters_.empty()) {
188  std::cerr << "ERROR: " << StringPrintf(
189  "No valid cameras after filtering! Aborting texturing.\n");
190  return;
191  }
192 
193  // Validate all cameras in filtered trajectory
194  for (size_t i = 0; i < filtered_trajectory->parameters_.size(); ++i) {
195  const auto& params = filtered_trajectory->parameters_[i];
196  if (!params.intrinsic_.IsValid()) {
197  std::cerr << "ERROR: " << StringPrintf(
198  "Camera %zu in filtered trajectory has invalid "
199  "intrinsics!\n",
200  i);
201  return;
202  }
203  // Check extrinsic matrix
204  for (int row = 0; row < 4; ++row) {
205  for (int col = 0; col < 4; ++col) {
206  if (!std::isfinite(params.extrinsic_(row, col))) {
207  std::cerr << "ERROR: " << StringPrintf(
208  "Camera %zu in filtered trajectory has invalid "
209  "extrinsics!\n",
210  i);
211  return;
212  }
213  }
214  }
215  }
216  }
217 
218  // Use mvs-texturing approach
219  mvs::MvsTexturing::Options mvs_options;
220  mvs_options.verbose = options_.verbose;
221  mvs_options.max_depth_error = options_.max_depth_error;
222  mvs_options.max_viewing_angle_deg = 75.0f; // mvs-texturing uses 75 degrees
223  mvs_options.use_depth_normal_maps = options_.use_depth_normal_maps;
224  // Use area for now, can enable GMI later
225  mvs_options.use_gradient_magnitude = false;
226 
227  mvs::MvsTexturing texturing(mvs_options, reconstruction_, workspace_.get(),
228  image_path_);
229 
230  // Pass loaded mesh (must be loaded at this point)
231  if (!mesh_loaded) {
232  std::cerr << "ERROR: " << StringPrintf("Mesh must be loaded before texturing!\n");
233  return;
234  }
235  if (texturing.TextureMesh(mesh, *filtered_trajectory,
236  options_.textured_file_path)) {
237  std::cout << StringPrintf("Save textured mesh to %s successfully!\n",
238  options_.textured_file_path.c_str());
239  } else {
240  std::cerr << "WARNING: " << StringPrintf("Texturing reconstruction failed!\n");
241  }
243 }
244 
245 bool TexturingReconstruction::Texturing(const image_t image_id,
246  std::size_t index) {
247  const colmap::Image& image = reconstruction_.Image(image_id);
248  const colmap::Camera& camera = reconstruction_.Camera(image.CameraId());
249 
250  const std::string input_image_path = JoinPaths(image_path_, image.Name());
251  const std::string texture_file = JoinPaths("images", image.Name());
252 
253  std::string target_file_path =
255  options_.textured_file_path),
256  texture_file);
257  if (!ExistsFile(target_file_path) &&
258  ExistsFile(JoinPaths(output_path_, texture_file))) {
259  FileCopy(JoinPaths(output_path_, texture_file), target_file_path);
260  }
261 
262  // Check if workspace is available and has depth/normal maps
263  int workspace_image_idx = GetWorkspaceImageIdx(image_id);
264 
265  std::cout << StringPrintf(
266  "[Texturing] Processing image_id=%u, image_name='%s', "
267  "workspace_image_idx=%d\n",
268  image_id, image.Name().c_str(), workspace_image_idx);
269 
270  // Store workspace_image_idx for later use in visibility checking
271  // This will be used when filtering cameras during texture mapping
272 
273  auto& cameraParams = camera_trajectory_->parameters_[index];
274  cameraParams.texture_file_ = texture_file;
275  const Eigen::Matrix3x4d proj_matrix = image.InverseProjectionMatrix();
276  Eigen::Matrix3d rotation = proj_matrix.leftCols<3>();
277  Eigen::Vector3d translation = proj_matrix.rightCols<1>();
279  extrinsic.setTranslation(translation.data());
280  cameraParams.extrinsic_ = ccGLMatrixd::ToEigenMatrix4(extrinsic);
281  std::string model_name = camera.ModelName();
282  // https://github.com/colmap/colmap/blob/dev/src/base/camera_models.h
283  if (model_name == "SIMPLE_PINHOLE" || model_name == "SIMPLE_RADIAL" ||
284  model_name == "SIMPLE_RADIAL_FISHEYE" || model_name == "RADIAL" ||
285  model_name == "RADIAL_FISHEYE") {
286  // Simple pinhole: f, cx, cy
287  cameraParams.intrinsic_.SetIntrinsics(
288  static_cast<int>(camera.Width()),
289  static_cast<int>(camera.Height()), camera.FocalLength(),
290  camera.FocalLength(), camera.PrincipalPointX(),
291  camera.PrincipalPointY());
292  } else if (model_name == "PINHOLE" || model_name == "OPENCV" ||
293  model_name == "OPENCV_FISHEYE" || model_name == "FULL_OPENCV" ||
294  model_name == "FOV" || model_name == "THIN_PRISM_FISHEYE") {
295  // Pinhole: fx, fy, cx, cy
296  cameraParams.intrinsic_.SetIntrinsics(
297  static_cast<int>(camera.Width()),
298  static_cast<int>(camera.Height()), camera.FocalLengthX(),
299  camera.FocalLengthY(), camera.PrincipalPointX(),
300  camera.PrincipalPointY());
301  // float fx = params[0];
302  // float fy = params[1];
303  // float dim_aspect = static_cast<float>(width) / height;
304  // float pixel_aspect = fy / fx;
305  // float img_aspect = dim_aspect * pixel_aspect;
306  // if (img_aspect < 1.0f) {
307  // camera_info.flen = fy / height;
308  // } else {
309  // camera_info.flen = fx / width;
310  // }
311  // camera_info.ppoint[0] = params[2] / width;
312  // camera_info.ppoint[1] = params[3] / height;
313  } else {
314  std::string msg =
315  "Unsupported camera model with texturing "
316  "detected! If possible, re-run the SfM reconstruction with the "
317  "SIMPLE_PINHOLE or the PINHOLE camera model (recommended). "
318  "Otherwise, use the undistortion step in Colmap to obtain "
319  "undistorted images and corresponding camera models without "
320  "radial "
321  "distortion.";
322  std::cerr << "ERROR: " << StringPrintf(msg.c_str()) << "\n";
323  return false;
324  }
325  return true;
326 }
327 
329  CHECK_GT(max_depth_error, 0.0f);
330  CHECK_GE(min_normal_consistency, -1.0f);
331  CHECK_LE(min_normal_consistency, 1.0f);
332  return true;
333 }
334 
336 #define PrintOption(option) \
337  std::cout << " " << #option ": " << option << std::endl
338  PrintHeading2("TexturingOptions");
349 #undef PrintOption
350 }
351 
352 int TexturingReconstruction::GetWorkspaceImageIdx(
353  const image_t image_id) const {
354  if (!workspace_) {
355  std::cerr << "WARNING: " << StringPrintf(
356  "[GetWorkspaceImageIdx] workspace_ is null for image_id=%u\n",
357  image_id);
358  return -1;
359  }
360 
361  const colmap::Image& image = reconstruction_.Image(image_id);
362  const std::string& image_name = image.Name();
363 
364  const auto& model = workspace_->GetModel();
365 
367  "[GetWorkspaceImageIdx] Looking up image_name='%s' (image_id=%u) "
368  "in workspace model (model has %zu images)\n",
369  image_name.c_str(), image_id, model.images.size());
370 
371  // GetImageIdx uses CHECK_GT and .at() which throws exception if not found
372  // Use try-catch to safely handle the case when image doesn't exist
373  try {
374  int workspace_image_idx = model.GetImageIdx(image_name);
375 
377  "[GetWorkspaceImageIdx] Found workspace_image_idx=%d for "
378  "image_name='%s'\n",
379  workspace_image_idx, image_name.c_str());
380 
381  // Validate the index is within valid range
382  if (workspace_image_idx < 0) {
383  std::cerr << "WARNING: " << StringPrintf(
384  "[GetWorkspaceImageIdx] Invalid negative "
385  "workspace_image_idx=%d for image_name='%s'\n",
386  workspace_image_idx, image_name.c_str());
387  return -1;
388  }
389 
390  if (static_cast<size_t>(workspace_image_idx) >= model.images.size()) {
391  std::cerr << "WARNING: " << StringPrintf(
392  "[GetWorkspaceImageIdx] workspace_image_idx=%d >= "
393  "model.images.size()=%zu for image_name='%s'\n",
394  workspace_image_idx, model.images.size(),
395  image_name.c_str());
396  return -1;
397  }
398 
400  "[GetWorkspaceImageIdx] Successfully mapped image_name='%s' -> "
401  "workspace_image_idx=%d\n",
402  image_name.c_str(), workspace_image_idx);
403  return workspace_image_idx;
404  } catch (const std::exception& e) {
405  // Image not found in workspace model or index out of range
406  std::cerr << "WARNING: " << StringPrintf(
407  "[GetWorkspaceImageIdx] Exception when looking up "
408  "image_name='%s': %s\n",
409  image_name.c_str(), e.what());
410  return -1;
411  }
412 }
413 
414 bool TexturingReconstruction::IsPointVisible(const Eigen::Vector3d& point3d,
415  const Image& image,
416  const Camera& camera,
417  int workspace_image_idx) const {
418  if (!options_.use_depth_normal_maps || !workspace_ ||
419  workspace_image_idx < 0) {
420  // Without depth map, assume visible if projection is valid
421  return true;
422  }
423 
424  // Validate index is within model range before accessing cache
425  const auto& model = workspace_->GetModel();
426  if (static_cast<size_t>(workspace_image_idx) >= model.images.size()) {
427  return true; // Invalid index, assume visible
428  }
429 
430  if (!workspace_->HasDepthMap(workspace_image_idx)) {
431  return true;
432  }
433 
434  try {
435  const auto& depth_map = workspace_->GetDepthMap(workspace_image_idx);
436 
437  // Project 3D point to image coordinates
438  // First transform to camera coordinates: [R|t] * [X; Y; Z; 1]
439  const Eigen::Matrix3x4d world_to_cam =
440  image.ProjectionMatrix(); // [R|t]
441  Eigen::Vector4d point_homogeneous(point3d.x(), point3d.y(), point3d.z(),
442  1.0);
443  Eigen::Vector3d cam_coords = world_to_cam * point_homogeneous;
444 
445  // Check if point is behind camera
446  if (cam_coords.z() <= 0) {
447  return false;
448  }
449 
450  // Build intrinsic matrix K
451  Eigen::Matrix3d K = Eigen::Matrix3d::Identity();
452  K(0, 0) = camera.MeanFocalLength();
453  K(1, 1) = camera.MeanFocalLength();
454  K(0, 2) = camera.PrincipalPointX();
455  K(1, 2) = camera.PrincipalPointY();
456 
457  // Project to pixel coordinates: K * [x_cam; y_cam; z_cam]
458  Eigen::Vector3d proj = K * cam_coords;
459  const double u = proj.x() / proj.z();
460  const double v = proj.y() / proj.z();
461 
462  // Check if projection is within image bounds
463  if (u < 0 || u >= camera.Width() || v < 0 || v >= camera.Height()) {
464  return false;
465  }
466 
467  const size_t row = static_cast<size_t>(std::round(v));
468  const size_t col = static_cast<size_t>(std::round(u));
469 
470  // Check if depth map dimensions match
471  if (row >= depth_map.GetHeight() || col >= depth_map.GetWidth()) {
472  return false;
473  }
474 
475  const float depth_map_value = depth_map.Get(row, col);
476  const float point_depth = static_cast<float>(proj(2));
477 
478  // Check if depth is valid (positive)
479  if (depth_map_value <= 0) {
480  return false;
481  }
482 
483  // Check depth consistency: point depth should be close to depth map
484  // value
485  const float depth_error =
486  std::abs(point_depth - depth_map_value) / depth_map_value;
487  return depth_error <= options_.max_depth_error;
488  } catch (const std::exception&) {
489  // Error accessing depth map, assume visible
490  return true;
491  }
492 }
493 
494 float TexturingReconstruction::ComputeViewQuality(
495  const Eigen::Vector3d& point3d,
496  const Eigen::Vector3d& face_normal,
497  const Image& image,
498  const Camera& camera,
499  int workspace_image_idx) const {
500  if (!options_.use_depth_normal_maps || !workspace_ ||
501  workspace_image_idx < 0) {
502  // Without normal map, use viewing angle as quality measure
503  const Eigen::Vector3d camera_pos = image.ProjectionCenter();
504  const Eigen::Vector3d view_dir = (point3d - camera_pos).normalized();
505  const double cos_angle = face_normal.normalized().dot(view_dir);
506  return static_cast<float>(std::max(0.0, cos_angle));
507  }
508 
509  // Validate index is within model range before accessing cache
510  const auto& model = workspace_->GetModel();
511  if (static_cast<size_t>(workspace_image_idx) >= model.images.size()) {
512  // Invalid index, use fallback
513  const Eigen::Vector3d camera_pos = image.ProjectionCenter();
514  const Eigen::Vector3d view_dir = (point3d - camera_pos).normalized();
515  const double cos_angle = face_normal.normalized().dot(view_dir);
516  return static_cast<float>(std::max(0.0, cos_angle));
517  }
518 
519  if (!workspace_->HasNormalMap(workspace_image_idx)) {
520  // Fallback to viewing angle
521  const Eigen::Vector3d camera_pos = image.ProjectionCenter();
522  const Eigen::Vector3d view_dir = (point3d - camera_pos).normalized();
523  const double cos_angle = face_normal.normalized().dot(view_dir);
524  return static_cast<float>(std::max(0.0, cos_angle));
525  }
526 
527  try {
528  const auto& normal_map = workspace_->GetNormalMap(workspace_image_idx);
529 
530  // Project 3D point to image coordinates
531  // First transform to camera coordinates: [R|t] * [X; Y; Z; 1]
532  const Eigen::Matrix3x4d world_to_cam =
533  image.ProjectionMatrix(); // [R|t]
534  Eigen::Vector4d point_homogeneous(point3d.x(), point3d.y(), point3d.z(),
535  1.0);
536  Eigen::Vector3d cam_coords = world_to_cam * point_homogeneous;
537 
538  if (cam_coords.z() <= 0) {
539  return 0.0f;
540  }
541 
542  // Build intrinsic matrix K
543  Eigen::Matrix3d K = Eigen::Matrix3d::Identity();
544  K(0, 0) = camera.MeanFocalLength();
545  K(1, 1) = camera.MeanFocalLength();
546  K(0, 2) = camera.PrincipalPointX();
547  K(1, 2) = camera.PrincipalPointY();
548 
549  // Project to pixel coordinates: K * [x_cam; y_cam; z_cam]
550  Eigen::Vector3d proj = K * cam_coords;
551  const double u = proj.x() / proj.z();
552  const double v = proj.y() / proj.z();
553 
554  if (u < 0 || u >= camera.Width() || v < 0 || v >= camera.Height()) {
555  return 0.0f;
556  }
557 
558  const size_t row = static_cast<size_t>(std::round(v));
559  const size_t col = static_cast<size_t>(std::round(u));
560 
561  if (row >= normal_map.GetHeight() || col >= normal_map.GetWidth()) {
562  return 0.0f;
563  }
564 
565  // Get normal from normal map (in camera coordinate system)
566  Eigen::Vector3d normal_map_normal(normal_map.Get(row, col, 0),
567  normal_map.Get(row, col, 1),
568  normal_map.Get(row, col, 2));
569 
570  // Transform face normal to camera coordinate system
571  // The rotation matrix transforms from world to camera coordinates
572  const Eigen::Matrix3d R = image.RotationMatrix();
573  const Eigen::Vector3d face_normal_cam = R * face_normal.normalized();
574 
575  // Compute consistency: cosine of angle between normals
576  const double cos_angle = face_normal_cam.normalized().dot(
577  normal_map_normal.normalized());
578 
579  return static_cast<float>(cos_angle);
580  } catch (const std::exception&) {
581  // Error accessing normal map, use fallback
582  const Eigen::Vector3d camera_pos = image.ProjectionCenter();
583  const Eigen::Vector3d view_dir = (point3d - camera_pos).normalized();
584  const double cos_angle = face_normal.normalized().dot(view_dir);
585  return static_cast<float>(std::max(0.0, cos_angle));
586  }
587 }
588 
589 std::shared_ptr<cloudViewer::camera::PinholeCameraTrajectory>
590 TexturingReconstruction::FilterCameraTrajectory(ccMesh* mesh) const {
591  auto filtered_trajectory =
592  std::make_shared<::cloudViewer::camera::PinholeCameraTrajectory>();
593 
594  // Sample mesh vertices for visibility testing
595  std::vector<Eigen::Vector3d> mesh_vertices;
596  if (!mesh || !mesh->getAssociatedCloud()) {
597  std::cerr << "ERROR: " << StringPrintf("Mesh is null or has no associated cloud!\n");
598  return filtered_trajectory;
599  }
600 
601  ccGenericPointCloud* generic_cloud = mesh->getAssociatedCloud();
602  ccPointCloud* cloud = ccHObjectCaster::ToPointCloud(generic_cloud);
603  int sample_step = 1; // Initialize for later use
604  if (cloud && cloud->size() > 0) {
605  // Sample vertices (every Nth vertex to speed up testing)
606  // IMPORTANT: Convert from local to global coordinates for visibility
607  // testing Camera transforms are in global coordinate system
608  const unsigned point_count = cloud->size();
609  sample_step = std::max(1, static_cast<int>(point_count / 1000));
610  mesh_vertices.reserve(point_count / sample_step);
611 
612  bool mesh_is_shifted = cloud->isShifted();
613  for (unsigned i = 0; i < point_count; i += sample_step) {
614  const CCVector3* pt = cloud->getPoint(i);
615  if (mesh_is_shifted) {
616  // Convert local to global coordinates
617  CCVector3d pt_global = cloud->toGlobal3d(*pt);
618  mesh_vertices.emplace_back(pt_global.x, pt_global.y,
619  pt_global.z);
620  } else {
621  mesh_vertices.emplace_back(pt->x, pt->y, pt->z);
622  }
623  }
624 
625  if (options_.verbose) {
626  std::cout << StringPrintf(
627  "Sampled %zu vertices from mesh for visibility testing\n",
628  mesh_vertices.size());
629  }
630  } else {
631  if (options_.verbose) {
632  std::cerr << "WARNING: " << StringPrintf("Mesh has no valid vertices for visibility testing\n");
633  }
634  }
635 
636  for (size_t i = 0; i < camera_trajectory_->parameters_.size(); ++i) {
637  const auto& cameraParams = camera_trajectory_->parameters_[i];
638 
639  // Find corresponding image_id
640  image_t image_id = kInvalidImageId;
641  if (image_ids_.empty()) {
642  if (i < reconstruction_.RegImageIds().size()) {
643  image_id = reconstruction_.RegImageIds().at(i);
644  }
645  } else {
646  if (i < image_ids_.size()) {
647  image_id = image_ids_[i];
648  }
649  }
650 
651  if (image_id == kInvalidImageId) {
652  continue;
653  }
654 
655  const colmap::Image& image = reconstruction_.Image(image_id);
656  const colmap::Camera& camera = reconstruction_.Camera(image.CameraId());
657  int workspace_image_idx = GetWorkspaceImageIdx(image_id);
658 
659  // Check if camera has valid depth/normal maps
660  // GetWorkspaceImageIdx now safely returns -1 if image not found or
661  // invalid
662  bool has_valid_maps = false;
663  if (workspace_image_idx >= 0 && workspace_) {
664  has_valid_maps = workspace_->HasDepthMap(workspace_image_idx) &&
665  workspace_->HasNormalMap(workspace_image_idx);
666  }
667 
668  // If using depth/normal maps, perform visibility testing
669  bool should_include = true;
670  if (options_.use_depth_normal_maps) {
671  if (!has_valid_maps) {
672  should_include = false;
673  if (options_.verbose) {
674  std::cerr << "WARNING: " << StringPrintf(
675  "Excluding camera %s from texturing (no "
676  "depth/normal maps)\n",
677  image.Name().c_str());
678  }
679  } else if (!mesh_vertices.empty()) {
680  // Test visibility on sampled mesh vertices
681  size_t visible_count = 0;
682  size_t quality_count = 0;
683 
684  // Get vertex normals if available
685  bool has_normals = cloud->hasNormals();
686 
687  for (size_t idx = 0; idx < mesh_vertices.size(); ++idx) {
688  const auto& vertex = mesh_vertices[idx];
689  // Test visibility using depth map
690  if (IsPointVisible(vertex, image, camera,
691  workspace_image_idx)) {
692  visible_count++;
693 
694  // Get actual vertex normal if available
695  Eigen::Vector3d face_normal(0, 0, 1); // Default normal
696  if (has_normals) {
697  // Map back to original vertex index
698  size_t vertex_idx = idx * sample_step;
699  if (vertex_idx < cloud->size()) {
700  const CCVector3& n =
701  cloud->getPointNormal(vertex_idx);
702  face_normal = Eigen::Vector3d(n.x, n.y, n.z);
703  // Convert to global coordinate system if needed
704  if (cloud->isShifted()) {
705  // Normals don't need shift/scale, but may
706  // need rotation For now, use as-is
707  // (assuming normals are already in global)
708  }
709  }
710  }
711 
712  float quality =
713  ComputeViewQuality(vertex, face_normal, image,
714  camera, workspace_image_idx);
715 
716  // Check if quality meets threshold
717  // Use absolute value since normals might be oriented
718  // differently
719  if (std::abs(quality) >=
720  options_.min_normal_consistency) {
721  quality_count++;
722  }
723  }
724  }
725 
726  // Include camera if it can see enough vertices with good
727  // quality
728  const double visibility_ratio =
729  static_cast<double>(visible_count) /
730  mesh_vertices.size();
731  const double quality_ratio =
732  visible_count > 0 ? static_cast<double>(quality_count) /
733  visible_count
734  : 0.0;
735 
736  const double min_visibility_ratio =
737  0.1; // At least 10% of vertices visible
738  // Lower quality ratio threshold: at least 5% of visible
739  // vertices have good quality This is more lenient since normal
740  // consistency can vary significantly
741  const double min_quality_ratio = 0.05;
742 
743  should_include = visibility_ratio >= min_visibility_ratio &&
744  quality_ratio >= min_quality_ratio;
745 
746  if (options_.verbose) {
747  if (should_include) {
748  std::cout << StringPrintf(
749  "Camera %s: visibility=%.2f%%, quality=%.2f%% "
750  "(included)\n",
751  image.Name().c_str(), visibility_ratio * 100.0,
752  quality_ratio * 100.0);
753  } else {
754  std::cout << "WARNING: " << StringPrintf(
755  "Camera %s: visibility=%.2f%%, quality=%.2f%% "
756  "(excluded)\n",
757  image.Name().c_str(), visibility_ratio * 100.0,
758  quality_ratio * 100.0);
759  }
760  }
761  }
762  }
763 
764  if (should_include) {
765  // Validate camera parameters before adding
766  if (!cameraParams.intrinsic_.IsValid()) {
767  if (options_.verbose) {
768  std::cerr << "WARNING: " << StringPrintf(
769  "Excluding camera %s from texturing (invalid "
770  "intrinsic parameters)\n",
771  image.Name().c_str());
772  }
773  continue;
774  }
775 
776  // Check if extrinsic matrix is valid (no NaN or Inf)
777  bool extrinsic_valid = true;
778  for (int row = 0; row < 4; ++row) {
779  for (int col = 0; col < 4; ++col) {
780  const double val = cameraParams.extrinsic_(row, col);
781  if (!std::isfinite(val)) {
782  extrinsic_valid = false;
783  break;
784  }
785  }
786  if (!extrinsic_valid) break;
787  }
788 
789  if (!extrinsic_valid) {
790  if (options_.verbose) {
791  std::cerr << "WARNING: " << StringPrintf(
792  "Excluding camera %s from texturing (invalid "
793  "extrinsic matrix)\n",
794  image.Name().c_str());
795  }
796  continue;
797  }
798 
799  filtered_trajectory->parameters_.push_back(cameraParams);
800  }
801  }
802 
803  // Final validation: ensure we have at least one valid camera
804  if (filtered_trajectory->parameters_.empty()) {
805  std::cerr << "WARNING: " << StringPrintf(
806  "No valid cameras found after filtering! Using original "
807  "trajectory.\n");
808  return camera_trajectory_;
809  }
810 
811  return filtered_trajectory;
812 }
813 
814 } // namespace colmap
std::shared_ptr< core::Tensor > image
int size
bool has_normals
Eigen::Matrix3d rotation
Definition: VoxelGridIO.cpp:27
static bool PrintDebug(const char *format,...)
Same as Print, but works only in Debug mode.
Definition: CVLog.cpp:153
Type y
Definition: CVGeom.h:137
Type x
Definition: CVGeom.h:137
Type z
Definition: CVGeom.h:137
static ccGLMatrixTpl< double > FromEigenMatrix3(const Eigen::Matrix< double, 3, 3 > &mat)
void setTranslation(const Vector3Tpl< float > &Tr)
Sets translation (float version)
static Eigen::Matrix< double, 4, 4 > ToEigenMatrix4(const ccGLMatrixTpl< float > &mat)
Double version of ccGLMatrixTpl.
Definition: ecvGLMatrix.h:56
A 3D cloud interface with associated features (color, normals, octree, etc.)
static ccPointCloud * ToPointCloud(ccHObject *obj, bool *isLockedVertices=nullptr)
Converts current object to 'equivalent' ccPointCloud.
Triangular mesh.
Definition: ecvMesh.h:35
virtual unsigned size() const override
Returns the number of triangles.
Definition: ecvMesh.cpp:2143
ccGenericPointCloud * getAssociatedCloud() const override
Returns the vertices cloud.
Definition: ecvMesh.h:143
bool hasNormals() const override
Returns whether normals are enabled or not.
Definition: ecvMesh.cpp:225
bool computeNormals(bool perVertex)
Computes normals.
Definition: ecvMesh.cpp:242
A 3D cloud and its associated features (color, normals, scalar fields, etc.)
bool hasNormals() const override
Returns whether normals are enabled or not.
const CCVector3 & getPointNormal(unsigned pointIndex) const override
Returns normal corresponding to a given point.
CCVector3d toGlobal3d(const Vector3Tpl< T > &Plocal) const
Returns the point back-projected into the original coordinates system.
bool isShifted() const
Returns whether the cloud is shifted or not.
virtual unsigned size() const =0
Returns the number of points.
unsigned size() const override
Definition: PointCloudTpl.h:38
const CCVector3 * getPoint(unsigned index) const override
double FocalLengthY() const
Definition: camera.cc:121
std::string ModelName() const
Definition: camera.cc:49
double FocalLength() const
Definition: camera.cc:109
double FocalLengthX() const
Definition: camera.cc:115
double PrincipalPointX() const
Definition: camera.cc:146
size_t Width() const
Definition: camera.h:160
double PrincipalPointY() const
Definition: camera.cc:152
size_t Height() const
Definition: camera.h:162
const std::string & Name() const
Definition: image.h:242
size_t NumRegImages() const
const class Image & Image(const image_t image_id) const
const class Camera & Camera(const camera_t camera_id) const
const std::vector< image_t > & RegImageIds() const
TexturingReconstruction(const TexturingOptions &options, const Reconstruction &reconstruction, const std::string &image_path, const std::string &output_path, const std::vector< image_t > &image_ids=std::vector< image_t >())
const Timer & GetTimer() const
Definition: threading.cc:154
bool IsStopped()
Definition: threading.cc:97
void PrintMinutes() const
Definition: timer.cc:93
const double * e
Matrix< double, 3, 4 > Matrix3x4d
Definition: types.h:39
bool AutoReadMesh(const std::string &filename, ccMesh &mesh, const ReadTriangleMeshOptions &params={})
Definition: AutoIO.cpp:306
std::string GetFileParentDirectory(const std::string &filename)
Definition: FileSystem.cpp:314
void PrintHeading2(const std::string &heading)
Definition: misc.cc:231
bool ExistsFile(const std::string &path)
Definition: misc.cc:100
void CreateDirIfNotExists(const std::string &path)
Definition: misc.cc:112
std::string JoinPaths(T const &... paths)
Definition: misc.h:128
uint32_t image_t
Definition: types.h:61
void PrintHeading1(const std::string &heading)
Definition: misc.cc:225
std::string StringPrintf(const char *format,...)
Definition: string.cc:131
const image_t kInvalidImageId
Definition: types.h:76
void FileCopy(const std::string &src_path, const std::string &dst_path, CopyType type)
Definition: misc.cc:85
#define PrintOption(option)