34 #include <unordered_set>
54 const size_t num_inliers,
55 const std::vector<char>& inlier_mask) {
58 for (
size_t i = 0; i < matches.size(); ++i) {
60 inlier_matches[j] = matches[i];
64 return inlier_matches;
69 CHECK_GE(matches.size(), inlier_matches.size());
71 std::unordered_set<std::pair<point2D_t, point2D_t>> inlier_matches_set;
72 inlier_matches_set.reserve(inlier_matches.size());
73 for (
const auto& match : inlier_matches) {
74 inlier_matches_set.emplace(match.point2D_idx1, match.point2D_idx2);
78 outlier_matches.reserve(matches.size() - inlier_matches.size());
80 for (
const auto& match : matches) {
81 if (inlier_matches_set.count(
83 outlier_matches.push_back(match);
87 return outlier_matches;
90 inline bool IsImagePointInBoundingBox(
const Eigen::Vector2d& point,
91 const double minx,
const double maxx,
92 const double miny,
const double maxy) {
93 return point.x() >= minx && point.x() <= maxx && point.y() >= miny &&
100 F.transposeInPlace();
101 E.transposeInPlace();
102 H =
H.inverse().eval();
104 const Eigen::Vector4d orig_qvec =
qvec;
105 const Eigen::Vector3d orig_tvec =
tvec;
109 std::swap(match.point2D_idx1, match.point2D_idx2);
114 const std::vector<Eigen::Vector2d>& points1,
116 const std::vector<Eigen::Vector2d>& points2,
127 const Camera& camera1,
const std::vector<Eigen::Vector2d>& points1,
128 const Camera& camera2,
const std::vector<Eigen::Vector2d>& points2,
131 std::vector<TwoViewGeometry> two_view_geometries;
134 two_view_geometry.
Estimate(camera1, points1, camera2, points2,
135 remaining_matches, options);
136 if (two_view_geometry.
config == ConfigurationType::DEGENERATE) {
141 if (two_view_geometry.
config != ConfigurationType::WATERMARK) {
142 two_view_geometries.push_back(two_view_geometry);
145 two_view_geometries.push_back(two_view_geometry);
148 remaining_matches = ExtractOutlierMatches(remaining_matches,
152 if (two_view_geometries.empty()) {
153 config = ConfigurationType::DEGENERATE;
154 }
else if (two_view_geometries.size() == 1) {
155 *
this = two_view_geometries[0];
157 config = ConfigurationType::MULTIPLE;
159 for (
const auto& two_view_geometry : two_view_geometries) {
161 two_view_geometry.inlier_matches.begin(),
162 two_view_geometry.inlier_matches.end());
168 const Camera& camera1,
const std::vector<Eigen::Vector2d>& points1,
169 const Camera& camera2,
const std::vector<Eigen::Vector2d>& points2) {
177 std::vector<Eigen::Vector2d> inlier_points1_normalized;
179 std::vector<Eigen::Vector2d> inlier_points2_normalized;
182 const point2D_t idx1 = match.point2D_idx1;
183 const point2D_t idx2 = match.point2D_idx2;
184 inlier_points1_normalized.push_back(camera1.
ImageToWorld(points1[idx1]));
185 inlier_points2_normalized.push_back(camera2.
ImageToWorld(points2[idx2]));
189 std::vector<Eigen::Vector3d> points3D;
197 inlier_points2_normalized, &R, &
tvec, &points3D);
203 inlier_points1_normalized, inlier_points2_normalized, &R, &
tvec, &n,
211 if (points3D.empty()) {
215 Eigen::Vector3d::Zero(), -R.transpose() *
tvec, points3D));
219 if (
tvec.norm() == 0) {
231 const Camera& camera1,
const std::vector<Eigen::Vector2d>& points1,
232 const Camera& camera2,
const std::vector<Eigen::Vector2d>& points2,
237 config = ConfigurationType::DEGENERATE;
242 std::vector<Eigen::Vector2d> matched_points1(matches.size());
243 std::vector<Eigen::Vector2d> matched_points2(matches.size());
244 std::vector<Eigen::Vector2d> matched_points1_normalized(matches.size());
245 std::vector<Eigen::Vector2d> matched_points2_normalized(matches.size());
246 for (
size_t i = 0; i < matches.size(); ++i) {
247 const point2D_t idx1 = matches[i].point2D_idx1;
248 const point2D_t idx2 = matches[i].point2D_idx2;
249 matched_points1[i] = points1[idx1];
250 matched_points2[i] = points2[idx2];
251 matched_points1_normalized[i] = camera1.
ImageToWorld(points1[idx1]);
252 matched_points2_normalized[i] = camera2.
ImageToWorld(points2[idx2]);
264 E_ransac(E_ransac_options);
265 const auto E_report =
266 E_ransac.
Estimate(matched_points1_normalized, matched_points2_normalized);
272 const auto F_report = F_ransac.Estimate(matched_points1, matched_points2);
279 const auto H_report = H_ransac.
Estimate(matched_points1, matched_points2);
282 if ((!E_report.success && !F_report.success && !H_report.success) ||
286 config = ConfigurationType::DEGENERATE;
292 const double E_F_inlier_ratio =
293 static_cast<double>(E_report.support.num_inliers) /
294 F_report.support.num_inliers;
295 const double H_F_inlier_ratio =
296 static_cast<double>(H_report.support.num_inliers) /
297 F_report.support.num_inliers;
298 const double H_E_inlier_ratio =
299 static_cast<double>(H_report.support.num_inliers) /
300 E_report.support.num_inliers;
302 const std::vector<char>* best_inlier_mask =
nullptr;
303 size_t num_inliers = 0;
310 if (E_report.support.num_inliers >= F_report.support.num_inliers) {
311 num_inliers = E_report.support.num_inliers;
312 best_inlier_mask = &E_report.inlier_mask;
314 num_inliers = F_report.support.num_inliers;
315 best_inlier_mask = &F_report.inlier_mask;
320 if (H_report.support.num_inliers > num_inliers) {
321 num_inliers = H_report.support.num_inliers;
322 best_inlier_mask = &H_report.inlier_mask;
325 config = ConfigurationType::CALIBRATED;
327 }
else if (F_report.success &&
331 num_inliers = F_report.support.num_inliers;
332 best_inlier_mask = &F_report.inlier_mask;
335 config = ConfigurationType::PLANAR_OR_PANORAMIC;
336 if (H_report.support.num_inliers > num_inliers) {
337 num_inliers = H_report.support.num_inliers;
338 best_inlier_mask = &H_report.inlier_mask;
341 config = ConfigurationType::UNCALIBRATED;
343 }
else if (H_report.success &&
345 num_inliers = H_report.support.num_inliers;
346 best_inlier_mask = &H_report.inlier_mask;
347 config = ConfigurationType::PLANAR_OR_PANORAMIC;
349 config = ConfigurationType::DEGENERATE;
353 if (best_inlier_mask !=
nullptr) {
355 ExtractInlierMatches(matches, num_inliers, *best_inlier_mask);
359 num_inliers, *best_inlier_mask, options)) {
360 config = ConfigurationType::WATERMARK;
366 const Camera& camera1,
const std::vector<Eigen::Vector2d>& points1,
367 const Camera& camera2,
const std::vector<Eigen::Vector2d>& points2,
372 config = ConfigurationType::DEGENERATE;
377 std::vector<Eigen::Vector2d> matched_points1(matches.size());
378 std::vector<Eigen::Vector2d> matched_points2(matches.size());
379 for (
size_t i = 0; i < matches.size(); ++i) {
380 matched_points1[i] = points1[matches[i].point2D_idx1];
381 matched_points2[i] = points2[matches[i].point2D_idx2];
389 const auto F_report = F_ransac.Estimate(matched_points1, matched_points2);
396 const auto H_report = H_ransac.
Estimate(matched_points1, matched_points2);
399 if ((!F_report.success && !H_report.success) ||
402 config = ConfigurationType::DEGENERATE;
408 const double H_F_inlier_ratio =
409 static_cast<double>(H_report.support.num_inliers) /
410 F_report.support.num_inliers;
413 config = ConfigurationType::PLANAR_OR_PANORAMIC;
415 config = ConfigurationType::UNCALIBRATED;
418 inlier_matches = ExtractInlierMatches(matches, F_report.support.num_inliers,
419 F_report.inlier_mask);
423 F_report.support.num_inliers, F_report.inlier_mask,
425 config = ConfigurationType::WATERMARK;
430 const Camera& camera1,
const std::vector<Eigen::Vector2d>& points1,
431 const Camera& camera2,
const std::vector<Eigen::Vector2d>& points2,
432 const size_t num_inliers,
const std::vector<char>& inlier_mask,
438 const double diagonal1 = std::sqrt(camera1.
Width() * camera1.
Width() +
440 const double diagonal2 = std::sqrt(camera2.
Width() * camera2.
Width() +
443 const double miny1 = minx1;
444 const double maxx1 = camera1.
Width() - minx1;
445 const double maxy1 = camera1.
Height() - miny1;
447 const double miny2 = minx2;
448 const double maxx2 = camera2.
Width() - minx2;
449 const double maxy2 = camera2.
Height() - miny2;
451 std::vector<Eigen::Vector2d> inlier_points1(num_inliers);
452 std::vector<Eigen::Vector2d> inlier_points2(num_inliers);
454 size_t num_matches_in_border = 0;
457 for (
size_t i = 0; i < inlier_mask.size(); ++i) {
458 if (inlier_mask[i]) {
459 const auto& point1 = points1[i];
460 const auto& point2 = points2[i];
462 inlier_points1[j] = point1;
463 inlier_points2[j] = point2;
466 if (!IsImagePointInBoundingBox(point1, minx1, maxx1, miny1, maxy1) &&
467 !IsImagePointInBoundingBox(point2, minx2, maxx2, miny2, maxy2)) {
468 num_matches_in_border += 1;
473 const double matches_in_border_ratio =
474 static_cast<double>(num_matches_in_border) / num_inliers;
486 ransac(ransac_options);
487 const auto report = ransac.
Estimate(inlier_points1, inlier_points2);
489 const double inlier_ratio =
490 static_cast<double>(report.support.num_inliers) / num_inliers;
Eigen::Vector2d ImageToWorld(const Eigen::Vector2d &image_point) const
double ImageToWorldThreshold(const double threshold) const
bool HasPriorFocalLength() const
Eigen::Matrix3d CalibrationMatrix() const
Report Estimate(const std::vector< typename Estimator::X_t > &X, const std::vector< typename Estimator::Y_t > &Y)
CLOUDVIEWER_HOST_DEVICE Pair< First, Second > make_pair(const First &_first, const Second &_second)
void PoseFromHomographyMatrix(const Eigen::Matrix3d &H, const Eigen::Matrix3d &K1, const Eigen::Matrix3d &K2, const std::vector< Eigen::Vector2d > &points1, const std::vector< Eigen::Vector2d > &points2, Eigen::Matrix3d *R, Eigen::Vector3d *t, Eigen::Vector3d *n, std::vector< Eigen::Vector3d > *points3D)
void InvertPose(const Eigen::Vector4d &qvec, const Eigen::Vector3d &tvec, Eigen::Vector4d *inv_qvec, Eigen::Vector3d *inv_tvec)
std::vector< double > CalculateTriangulationAngles(const Eigen::Vector3d &proj_center1, const Eigen::Vector3d &proj_center2, const std::vector< Eigen::Vector3d > &points3D)
double Median(const std::vector< T > &elems)
Eigen::Vector4d RotationMatrixToQuaternion(const Eigen::Matrix3d &rot_mat)
void PoseFromEssentialMatrix(const Eigen::Matrix3d &E, const std::vector< Eigen::Vector2d > &points1, const std::vector< Eigen::Vector2d > &points2, Eigen::Matrix3d *R, Eigen::Vector3d *t, std::vector< Eigen::Vector3d > *points3D)
std::vector< FeatureMatch > FeatureMatches
void swap(cloudViewer::core::SmallVectorImpl< T > &LHS, cloudViewer::core::SmallVectorImpl< T > &RHS)
Implement std::swap in terms of SmallVector swap.
double watermark_min_inlier_ratio
double max_H_inlier_ratio
double min_E_F_inlier_ratio
double watermark_border_size
bool multiple_ignore_watermark
RANSACOptions ransac_options
void EstimateUncalibrated(const Camera &camera1, const std::vector< Eigen::Vector2d > &points1, const Camera &camera2, const std::vector< Eigen::Vector2d > &points2, const FeatureMatches &matches, const Options &options)
void EstimateCalibrated(const Camera &camera1, const std::vector< Eigen::Vector2d > &points1, const Camera &camera2, const std::vector< Eigen::Vector2d > &points2, const FeatureMatches &matches, const Options &options)
void EstimateMultiple(const Camera &camera1, const std::vector< Eigen::Vector2d > &points1, const Camera &camera2, const std::vector< Eigen::Vector2d > &points2, const FeatureMatches &matches, const Options &options)
static bool DetectWatermark(const Camera &camera1, const std::vector< Eigen::Vector2d > &points1, const Camera &camera2, const std::vector< Eigen::Vector2d > &points2, const size_t num_inliers, const std::vector< char > &inlier_mask, const Options &options)
void Estimate(const Camera &camera1, const std::vector< Eigen::Vector2d > &points1, const Camera &camera2, const std::vector< Eigen::Vector2d > &points2, const FeatureMatches &matches, const Options &options)
bool EstimateRelativePose(const Camera &camera1, const std::vector< Eigen::Vector2d > &points1, const Camera &camera2, const std::vector< Eigen::Vector2d > &points2)
FeatureMatches inlier_matches