ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
pose.cc
Go to the documentation of this file.
1 // Copyright (c) 2018, ETH Zurich and UNC Chapel Hill.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 //
10 // * Redistributions in binary form must reproduce the above copyright
11 // notice, this list of conditions and the following disclaimer in the
12 // documentation and/or other materials provided with the distribution.
13 //
14 // * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
15 // its contributors may be used to endorse or promote products derived
16 // from this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 // POSSIBILITY OF SUCH DAMAGE.
29 //
30 // Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
31 
32 #include "estimators/pose.h"
33 
34 #include "base/camera_models.h"
35 #include "base/cost_functions.h"
36 #include "base/essential_matrix.h"
37 #include "base/pose.h"
41 #include "util/matrix.h"
42 #include "util/misc.h"
43 #include "util/threading.h"
44 
45 namespace colmap {
46 namespace {
47 
48 typedef LORANSAC<P3PEstimator, EPNPEstimator> AbsolutePoseRANSAC;
49 
50 void EstimateAbsolutePoseKernel(const Camera& camera,
51  const double focal_length_factor,
52  const std::vector<Eigen::Vector2d>& points2D,
53  const std::vector<Eigen::Vector3d>& points3D,
54  const RANSACOptions& options,
55  AbsolutePoseRANSAC::Report* report) {
56  // Scale the focal length by the given factor.
57  Camera scaled_camera = camera;
58  const std::vector<size_t>& focal_length_idxs = camera.FocalLengthIdxs();
59  for (const size_t idx : focal_length_idxs) {
60  scaled_camera.Params(idx) *= focal_length_factor;
61  }
62 
63  // Normalize image coordinates with current camera hypothesis.
64  std::vector<Eigen::Vector2d> points2D_N(points2D.size());
65  for (size_t i = 0; i < points2D.size(); ++i) {
66  points2D_N[i] = scaled_camera.ImageToWorld(points2D[i]);
67  }
68 
69  // Estimate pose for given focal length.
70  auto custom_options = options;
71  custom_options.max_error =
72  scaled_camera.ImageToWorldThreshold(options.max_error);
73  AbsolutePoseRANSAC ransac(custom_options);
74  *report = ransac.Estimate(points2D_N, points3D);
75 }
76 
77 } // namespace
78 
80  const std::vector<Eigen::Vector2d>& points2D,
81  const std::vector<Eigen::Vector3d>& points3D,
82  Eigen::Vector4d* qvec,
83  Eigen::Vector3d* tvec,
84  Camera* camera,
85  size_t* num_inliers,
86  std::vector<char>* inlier_mask) {
87  options.Check();
88 
89  std::vector<double> focal_length_factors;
90  if (options.estimate_focal_length) {
91  // Generate focal length factors using a quadratic function,
92  // such that more samples are drawn for small focal lengths
93  focal_length_factors.reserve(options.num_focal_length_samples + 1);
94  const double fstep = 1.0 / options.num_focal_length_samples;
95  const double fscale =
97  for (double f = 0; f <= 1.0; f += fstep) {
98  focal_length_factors.push_back(options.min_focal_length_ratio +
99  fscale * f * f);
100  }
101  } else {
102  focal_length_factors.reserve(1);
103  focal_length_factors.push_back(1);
104  }
105 
106  std::vector<std::future<void>> futures;
107  futures.resize(focal_length_factors.size());
108  std::vector<typename AbsolutePoseRANSAC::Report,
109  Eigen::aligned_allocator<typename AbsolutePoseRANSAC::Report>>
110  reports;
111  reports.resize(focal_length_factors.size());
112 
113  ThreadPool thread_pool(
114  std::min(options.num_threads,
115  static_cast<int>(focal_length_factors.size())));
116 
117  for (size_t i = 0; i < focal_length_factors.size(); ++i) {
118  futures[i] = thread_pool.AddTask(
119  EstimateAbsolutePoseKernel, *camera, focal_length_factors[i],
120  points2D, points3D, options.ransac_options, &reports[i]);
121  }
122 
123  double focal_length_factor = 0;
124  Eigen::Matrix3x4d proj_matrix;
125  *num_inliers = 0;
126  inlier_mask->clear();
127 
128  // Find best model among all focal lengths.
129  for (size_t i = 0; i < focal_length_factors.size(); ++i) {
130  futures[i].get();
131  const auto report = reports[i];
132  if (report.success && report.support.num_inliers > *num_inliers) {
133  *num_inliers = report.support.num_inliers;
134  proj_matrix = report.model;
135  *inlier_mask = report.inlier_mask;
136  focal_length_factor = focal_length_factors[i];
137  }
138  }
139 
140  if (*num_inliers == 0) {
141  return false;
142  }
143 
144  // Scale output camera with best estimated focal length.
145  if (options.estimate_focal_length && *num_inliers > 0) {
146  const std::vector<size_t>& focal_length_idxs =
147  camera->FocalLengthIdxs();
148  for (const size_t idx : focal_length_idxs) {
149  camera->Params(idx) *= focal_length_factor;
150  }
151  }
152 
153  // Extract pose parameters.
154  *qvec = RotationMatrixToQuaternion(proj_matrix.leftCols<3>());
155  *tvec = proj_matrix.rightCols<1>();
156 
157  if (IsNaN(*qvec) || IsNaN(*tvec)) {
158  return false;
159  }
160 
161  return true;
162 }
163 
164 size_t EstimateRelativePose(const RANSACOptions& ransac_options,
165  const std::vector<Eigen::Vector2d>& points1,
166  const std::vector<Eigen::Vector2d>& points2,
167  Eigen::Vector4d* qvec,
168  Eigen::Vector3d* tvec) {
169  RANSAC<EssentialMatrixFivePointEstimator> ransac(ransac_options);
170  const auto report = ransac.Estimate(points1, points2);
171 
172  if (!report.success) {
173  return 0;
174  }
175 
176  std::vector<Eigen::Vector2d> inliers1(report.support.num_inliers);
177  std::vector<Eigen::Vector2d> inliers2(report.support.num_inliers);
178 
179  size_t j = 0;
180  for (size_t i = 0; i < points1.size(); ++i) {
181  if (report.inlier_mask[i]) {
182  inliers1[j] = points1[i];
183  inliers2[j] = points2[i];
184  j += 1;
185  }
186  }
187 
188  Eigen::Matrix3d R;
189 
190  std::vector<Eigen::Vector3d> points3D;
191  PoseFromEssentialMatrix(report.model, inliers1, inliers2, &R, tvec,
192  &points3D);
193 
194  *qvec = RotationMatrixToQuaternion(R);
195 
196  if (IsNaN(*qvec) || IsNaN(*tvec)) {
197  return 0;
198  }
199 
200  return points3D.size();
201 }
202 
204  const std::vector<char>& inlier_mask,
205  const std::vector<Eigen::Vector2d>& points2D,
206  const std::vector<Eigen::Vector3d>& points3D,
207  Eigen::Vector4d* qvec,
208  Eigen::Vector3d* tvec,
209  Camera* camera) {
210  CHECK_EQ(inlier_mask.size(), points2D.size());
211  CHECK_EQ(points2D.size(), points3D.size());
212  options.Check();
213 
214  ceres::LossFunction* loss_function =
215  new ceres::CauchyLoss(options.loss_function_scale);
216 
217  double* camera_params_data = camera->ParamsData();
218  double* qvec_data = qvec->data();
219  double* tvec_data = tvec->data();
220 
221  std::vector<Eigen::Vector3d> points3D_copy = points3D;
222 
223  ceres::Problem problem;
224 
225  for (size_t i = 0; i < points2D.size(); ++i) {
226  // Skip outlier observations
227  if (!inlier_mask[i]) {
228  continue;
229  }
230 
231  ceres::CostFunction* cost_function = nullptr;
232 
233  switch (camera->ModelId()) {
234 #define CAMERA_MODEL_CASE(CameraModel) \
235  case CameraModel::kModelId: \
236  cost_function = BundleAdjustmentCostFunction<CameraModel>::Create( \
237  points2D[i]); \
238  break;
239 
241 
242 #undef CAMERA_MODEL_CASE
243  }
244 
245  problem.AddResidualBlock(cost_function, loss_function, qvec_data,
246  tvec_data, points3D_copy[i].data(),
247  camera_params_data);
248  problem.SetParameterBlockConstant(points3D_copy[i].data());
249  }
250 
251  if (problem.NumResiduals() > 0) {
252  // Quaternion parameterization.
253  *qvec = NormalizeQuaternion(*qvec);
254  SetQuaternionManifold(&problem, qvec_data);
255  // ceres::LocalParameterization* quaternion_parameterization =
256  // new ceres::QuaternionParameterization;
257  // problem.SetParameterization(qvec_data, quaternion_parameterization);
258 
259  // Camera parameterization.
260  if (!options.refine_focal_length && !options.refine_extra_params) {
261  problem.SetParameterBlockConstant(camera->ParamsData());
262  } else {
263  // Always set the principal point as fixed.
264  std::vector<int> camera_params_const;
265  const std::vector<size_t>& principal_point_idxs =
266  camera->PrincipalPointIdxs();
267  camera_params_const.insert(camera_params_const.end(),
268  principal_point_idxs.begin(),
269  principal_point_idxs.end());
270 
271  if (!options.refine_focal_length) {
272  const std::vector<size_t>& focal_length_idxs =
273  camera->FocalLengthIdxs();
274  camera_params_const.insert(camera_params_const.end(),
275  focal_length_idxs.begin(),
276  focal_length_idxs.end());
277  }
278 
279  if (!options.refine_extra_params) {
280  const std::vector<size_t>& extra_params_idxs =
281  camera->ExtraParamsIdxs();
282  camera_params_const.insert(camera_params_const.end(),
283  extra_params_idxs.begin(),
284  extra_params_idxs.end());
285  }
286 
287  if (camera_params_const.size() == camera->NumParams()) {
288  problem.SetParameterBlockConstant(camera->ParamsData());
289  } else {
290  SetSubsetManifold(static_cast<int>(camera->NumParams()),
291  camera_params_const, &problem,
292  camera->ParamsData());
293  // ceres::SubsetParameterization* camera_params_parameterization
294  // =
295  // new ceres::SubsetParameterization(
296  // static_cast<int>(camera->NumParams()),
297  // camera_params_const);
298  // problem.SetParameterization(camera->ParamsData(),
299  // camera_params_parameterization);
300  }
301  }
302  }
303 
304  ceres::Solver::Options solver_options;
305  solver_options.gradient_tolerance = options.gradient_tolerance;
306  solver_options.max_num_iterations = options.max_num_iterations;
307  solver_options.linear_solver_type = ceres::DENSE_QR;
308 
309  // The overhead of creating threads is too large.
310  solver_options.num_threads = 1;
311 #if CERES_VERSION_MAJOR < 2
312  solver_options.num_linear_solver_threads = 1;
313 #endif // CERES_VERSION_MAJOR
314 
315  ceres::Solver::Summary summary;
316  ceres::Solve(solver_options, &problem, &summary);
317 
318  if (solver_options.minimizer_progress_to_stdout) {
319  std::cout << std::endl;
320  }
321 
322  if (options.print_summary) {
323  PrintHeading2("Pose refinement report");
324  PrintSolverSummary(summary);
325  }
326 
327  return summary.IsSolutionUsable();
328 }
329 
330 bool RefineRelativePose(const ceres::Solver::Options& options,
331  const std::vector<Eigen::Vector2d>& points1,
332  const std::vector<Eigen::Vector2d>& points2,
333  Eigen::Vector4d* qvec,
334  Eigen::Vector3d* tvec) {
335  CHECK_EQ(points1.size(), points2.size());
336 
337  // CostFunction assumes unit quaternions.
338  *qvec = NormalizeQuaternion(*qvec);
339 
340  const double kMaxL2Error = 1.0;
341  ceres::LossFunction* loss_function = new ceres::CauchyLoss(kMaxL2Error);
342 
343  ceres::Problem problem;
344 
345  for (size_t i = 0; i < points1.size(); ++i) {
346  ceres::CostFunction* cost_function =
347  RelativePoseCostFunction::Create(points1[i], points2[i]);
348  problem.AddResidualBlock(cost_function, loss_function, qvec->data(),
349  tvec->data());
350  }
351 
352  // ceres::LocalParameterization* quaternion_parameterization =
353  // new ceres::QuaternionParameterization;
354  // problem.SetParameterization(qvec->data(), quaternion_parameterization);
355 
356  // ceres::HomogeneousVectorParameterization* homogeneous_parameterization =
357  // new ceres::HomogeneousVectorParameterization(3);
358  // problem.SetParameterization(tvec->data(), homogeneous_parameterization);
359 
360  SetQuaternionManifold(&problem, qvec->data());
361  SetSphereManifold<3>(&problem, tvec->data());
362 
363  ceres::Solver::Summary summary;
364  ceres::Solve(options, &problem, &summary);
365 
366  return summary.IsSolutionUsable();
367 }
368 
369 } // namespace colmap
#define CAMERA_MODEL_SWITCH_CASES
const std::vector< double > & Params() const
Definition: camera.h:176
int ModelId() const
Definition: camera.h:158
const std::vector< size_t > & PrincipalPointIdxs() const
Definition: camera.cc:67
size_t NumParams() const
Definition: camera.h:174
const std::vector< size_t > & ExtraParamsIdxs() const
Definition: camera.cc:71
const std::vector< size_t > & FocalLengthIdxs() const
Definition: camera.cc:63
const double * ParamsData() const
Definition: camera.h:184
Report Estimate(const std::vector< typename Estimator::X_t > &X, const std::vector< typename Estimator::Y_t > &Y)
Definition: ransac.h:159
static ceres::CostFunction * Create(const Eigen::Vector2d &x1, const Eigen::Vector2d &x2)
auto AddTask(func_t &&f, args_t &&... args) -> std::future< typename std::result_of< func_t(args_t...)>::type >
Definition: threading.h:299
GraphType data
Definition: graph_cut.cc:138
Matrix< double, 3, 4 > Matrix3x4d
Definition: types.h:39
QTextStream & endl(QTextStream &stream)
Definition: QtCompat.h:718
void Solve(const Tensor &A, const Tensor &B, Tensor &X)
Solve AX = B with LU decomposition. A is a square matrix.
Definition: Solve.cpp:22
void PrintHeading2(const std::string &heading)
Definition: misc.cc:231
size_t EstimateRelativePose(const RANSACOptions &ransac_options, const std::vector< Eigen::Vector2d > &points1, const std::vector< Eigen::Vector2d > &points2, Eigen::Vector4d *qvec, Eigen::Vector3d *tvec)
Definition: pose.cc:164
Eigen::Vector4d NormalizeQuaternion(const Eigen::Vector4d &qvec)
Definition: pose.cc:82
void PrintSolverSummary(const ceres::Solver::Summary &summary)
bool EstimateAbsolutePose(const AbsolutePoseEstimationOptions &options, const std::vector< Eigen::Vector2d > &points2D, const std::vector< Eigen::Vector3d > &points3D, Eigen::Vector4d *qvec, Eigen::Vector3d *tvec, Camera *camera, size_t *num_inliers, std::vector< char > *inlier_mask)
Definition: pose.cc:79
Eigen::Vector4d RotationMatrixToQuaternion(const Eigen::Matrix3d &rot_mat)
Definition: pose.cc:70
void SetQuaternionManifold(ceres::Problem *problem, double *quat_xyzw)
Definition: manifold.h:19
bool RefineRelativePose(const ceres::Solver::Options &options, const std::vector< Eigen::Vector2d > &points1, const std::vector< Eigen::Vector2d > &points2, Eigen::Vector4d *qvec, Eigen::Vector3d *tvec)
Definition: pose.cc:330
bool RefineAbsolutePose(const AbsolutePoseRefinementOptions &options, const std::vector< char > &inlier_mask, const std::vector< Eigen::Vector2d > &points2D, const std::vector< Eigen::Vector3d > &points3D, Eigen::Vector4d *qvec, Eigen::Vector3d *tvec, Camera *camera)
Definition: pose.cc:203
bool IsNaN(const float x)
Definition: math.h:160
void PoseFromEssentialMatrix(const Eigen::Matrix3d &E, const std::vector< Eigen::Vector2d > &points1, const std::vector< Eigen::Vector2d > &points2, Eigen::Matrix3d *R, Eigen::Vector3d *t, std::vector< Eigen::Vector3d > *points3D)
void SetSubsetManifold(int size, const std::vector< int > &constant_params, ceres::Problem *problem, double *params)
Definition: manifold.h:29