46 struct VanishingPointEstimator {
48 typedef LineSegment X_t;
50 typedef Eigen::Vector3d Y_t;
52 typedef Eigen::Vector3d M_t;
58 static std::vector<M_t> Estimate(
const std::vector<X_t>& line_segments,
59 const std::vector<Y_t>& lines) {
60 CHECK_EQ(line_segments.size(), 2);
61 CHECK_EQ(lines.size(), 2);
62 return {lines[0].cross(lines[1])};
67 static void Residuals(
const std::vector<X_t>& line_segments,
68 const std::vector<Y_t>& lines,
69 const M_t& vanishing_point,
70 std::vector<double>* residuals) {
71 residuals->resize(line_segments.size());
74 if (vanishing_point[2] == 0) {
75 std::fill(residuals->begin(), residuals->end(),
76 std::numeric_limits<double>::max());
80 for (
size_t i = 0; i < lines.size(); ++i) {
81 const Eigen::Vector3d midpoint =
82 (0.5 * (line_segments[i].start + line_segments[i].end)).homogeneous();
83 const Eigen::Vector3d connecting_line = midpoint.cross(vanishing_point);
84 const double signed_distance =
85 connecting_line.dot(line_segments[i].end.homogeneous()) /
86 connecting_line.head<2>().norm();
87 (*residuals)[i] = signed_distance * signed_distance;
92 Eigen::Vector3d FindBestConsensusAxis(
const std::vector<Eigen::Vector3d>& axes,
93 const double max_distance) {
95 return Eigen::Vector3d::Zero();
98 std::vector<int> inlier_idxs;
99 inlier_idxs.reserve(axes.size());
101 std::vector<int> best_inlier_idxs;
102 best_inlier_idxs.reserve(axes.size());
104 double best_inlier_distance_sum = std::numeric_limits<double>::max();
106 for (
size_t i = 0; i < axes.size(); ++i) {
107 const Eigen::Vector3d ref_axis = axes[i];
108 double inlier_distance_sum = 0;
110 for (
size_t j = 0; j < axes.size(); ++j) {
112 inlier_idxs.push_back(j);
114 const double distance = 1 - ref_axis.dot(axes[j]);
117 inlier_idxs.push_back(j);
122 if (inlier_idxs.size() > best_inlier_idxs.size() ||
123 (inlier_idxs.size() == best_inlier_idxs.size() &&
124 inlier_distance_sum < best_inlier_distance_sum)) {
125 best_inlier_distance_sum = inlier_distance_sum;
126 best_inlier_idxs = inlier_idxs;
130 if (best_inlier_idxs.empty()) {
131 return Eigen::Vector3d::Zero();
134 Eigen::Vector3d best_axis(0, 0, 0);
135 for (
const auto idx : best_inlier_idxs) {
136 best_axis += axes[idx];
138 best_axis /= best_inlier_idxs.size();
146 const Reconstruction& reconstruction,
const double max_axis_distance) {
147 std::vector<Eigen::Vector3d> downward_axes;
149 for (
const auto image_id : reconstruction.
RegImageIds()) {
150 const auto&
image = reconstruction.
Image(image_id);
151 downward_axes.push_back(
image.RotationMatrix().row(1));
153 return FindBestConsensusAxis(downward_axes, max_axis_distance);
158 const Reconstruction& reconstruction,
const std::string& image_path) {
159 std::vector<Eigen::Vector3d> rightward_axes;
160 std::vector<Eigen::Vector3d> downward_axes;
161 for (
size_t i = 0; i < reconstruction.
NumRegImages(); ++i) {
162 const auto image_id = reconstruction.
RegImageIds()[i];
163 const auto&
image = reconstruction.
Image(image_id);
164 const auto& camera = reconstruction.
Camera(
image.CameraId());
167 image.Name().c_str(), i + 1,
170 std::cout <<
"Reading image..." <<
std::endl;
175 std::cout <<
"Undistorting image..." <<
std::endl;
180 Bitmap undistorted_bitmap;
181 Camera undistorted_camera;
182 UndistortImage(undistortion_options, bitmap, camera, &undistorted_bitmap,
183 &undistorted_camera);
185 std::cout <<
"Detecting lines...";
187 const std::vector<LineSegment> line_segments =
189 const std::vector<LineSegmentOrientation> line_orientations =
195 std::vector<LineSegment> horizontal_line_segments;
196 std::vector<LineSegment> vertical_line_segments;
197 std::vector<Eigen::Vector3d> horizontal_lines;
198 std::vector<Eigen::Vector3d> vertical_lines;
199 for (
size_t i = 0; i < line_segments.size(); ++i) {
200 const auto line_segment = line_segments[i];
201 const Eigen::Vector3d line_segment_start =
202 line_segment.start.homogeneous();
203 const Eigen::Vector3d line_segment_end = line_segment.end.homogeneous();
204 const Eigen::Vector3d line = line_segment_start.cross(line_segment_end);
206 horizontal_line_segments.push_back(line_segment);
207 horizontal_lines.push_back(line);
209 vertical_line_segments.push_back(line_segment);
210 vertical_lines.push_back(line);
214 std::cout <<
StringPrintf(
" (%d horizontal, %d vertical)",
215 horizontal_lines.size(), vertical_lines.size())
218 std::cout <<
"Estimating vanishing points...";
223 const auto horizontal_report =
224 ransac.
Estimate(horizontal_line_segments, horizontal_lines);
225 const auto vertical_report =
226 ransac.
Estimate(vertical_line_segments, vertical_lines);
228 std::cout <<
StringPrintf(
" (%d horizontal inliers, %d vertical inliers)",
229 horizontal_report.support.num_inliers,
230 vertical_report.support.num_inliers)
233 std::cout <<
"Composing coordinate axes..." <<
std::endl;
235 const Eigen::Matrix3d inv_calib_matrix =
239 if (horizontal_report.success) {
240 const Eigen::Vector3d horizontal_camera_axis =
241 (inv_calib_matrix * horizontal_report.model).normalized();
242 Eigen::Vector3d horizontal_axis =
245 if (rightward_axes.size() > 0 &&
246 rightward_axes[0].dot(horizontal_axis) < 0) {
247 horizontal_axis = -horizontal_axis;
249 rightward_axes.push_back(horizontal_axis);
250 std::cout <<
" Horizontal: " << horizontal_axis.transpose() <<
std::endl;
253 if (vertical_report.success) {
254 const Eigen::Vector3d vertical_camera_axis =
255 (inv_calib_matrix * vertical_report.model).normalized();
256 Eigen::Vector3d vertical_axis =
260 if (vertical_camera_axis.dot(Eigen::Vector3d(0, 1, 0)) < 0) {
261 vertical_axis = -vertical_axis;
263 downward_axes.push_back(vertical_axis);
264 std::cout <<
" Vertical: " << vertical_axis.transpose() <<
std::endl;
270 Eigen::Matrix3d
frame = Eigen::Matrix3d::Zero();
272 if (rightward_axes.size() > 0) {
277 std::cout <<
"Found rightward axis: " <<
frame.col(0).transpose()
280 if (downward_axes.size() > 0) {
285 std::cout <<
"Found downward axis: " <<
frame.col(1).transpose() <<
std::endl;
287 if (rightward_axes.size() > 0 && downward_axes.size() > 0) {
289 Eigen::JacobiSVD<Eigen::Matrix3d> svd(
290 frame, Eigen::ComputeFullV | Eigen::ComputeFullU);
291 const Eigen::Matrix3d orthonormal_frame =
292 svd.matrixU() * Eigen::Matrix3d::Identity() * svd.matrixV().transpose();
293 frame = orthonormal_frame;
296 std::cout <<
"Found orthonormal frame: " <<
std::endl;
307 for (
const auto& point : recon->
Points3D()) {
308 points.col(pidx++) = point.second.XYZ() - centroid;
310 const Eigen::Matrix3d basis =
311 points.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).matrixU();
312 Eigen::Matrix3d rot_mat;
313 rot_mat << basis.col(0), basis.col(1), basis.col(0).cross(basis.col(1));
314 rot_mat.transposeInPlace();
317 -rot_mat * centroid);
324 rot_mat << basis.col(0), -basis.col(1), basis.col(0).cross(-basis.col(1));
325 rot_mat.transposeInPlace();
327 -rot_mat * centroid);
337 const Eigen::Vector3d ell_centroid = gps_tform.
XYZToEll({centroid}).
at(0);
340 const double sin_lat = sin(
DegToRad(ell_centroid(0)));
341 const double sin_lon = sin(
DegToRad(ell_centroid(1)));
342 const double cos_lat = cos(
DegToRad(ell_centroid(0)));
343 const double cos_lon = cos(
DegToRad(ell_centroid(1)));
346 Eigen::Matrix3d rot_mat;
347 rot_mat << -sin_lon, cos_lon, 0, -cos_lon * sin_lat, -sin_lon * sin_lat,
348 cos_lat, cos_lon * cos_lat, sin_lon * cos_lat, sin_lat;
350 const double scale = unscaled ? 1.0 / tform->
Scale() : 1.0;
352 -(scale * rot_mat) * centroid);
std::shared_ptr< core::Tensor > image
bool Read(const std::string &path, const bool as_rgb=true)
Eigen::Matrix3d CalibrationMatrix() const
const Eigen::Vector3d & Tvec() const
const Eigen::Vector4d & Qvec() const
Eigen::Vector3d ProjectionCenter() const
Report Estimate(const std::vector< typename Estimator::X_t > &X, const std::vector< typename Estimator::Y_t > &Y)
size_t NumRegImages() const
void Transform(const SimilarityTransform3 &tform)
const std::unordered_map< image_t, class Image > & Images() const
Eigen::Vector3d ComputeCentroid(const double p0=0.1, const double p1=0.9) const
const class Image & Image(const image_t image_id) const
size_t NumPoints3D() const
const class Camera & Camera(const camera_t camera_id) const
const std::unordered_map< point3D_t, class Point3D > & Points3D() const
const std::vector< image_t > & RegImageIds() const
static const int kMinNumSamples
QTextStream & endl(QTextStream &stream)
static double distance(T *pot1, T *pot2)
void UndistortImage(const UndistortCameraOptions &options, const Bitmap &distorted_bitmap, const Camera &distorted_camera, Bitmap *undistorted_bitmap, Camera *undistorted_camera)
std::vector< LineSegment > DetectLineSegments(const Bitmap &bitmap, const double min_length)
Eigen::Vector4d RotationMatrixToQuaternion(const Eigen::Matrix3d &rot_mat)
Eigen::Matrix3d EstimateManhattanWorldFrame(const ManhattanWorldFrameEstimationOptions &options, const Reconstruction &reconstruction, const std::string &image_path)
Eigen::Vector4d InvertQuaternion(const Eigen::Vector4d &qvec)
std::string JoinPaths(T const &... paths)
std::vector< LineSegmentOrientation > ClassifyLineSegmentOrientations(const std::vector< LineSegment > &segments, const double tolerance)
Eigen::Vector3d EstimateGravityVectorFromImageOrientation(const Reconstruction &reconstruction, const double max_axis_distance)
Eigen::Vector3d QuaternionRotatePoint(const Eigen::Vector4d &qvec, const Eigen::Vector3d &point)
void PrintHeading1(const std::string &heading)
std::string StringPrintf(const char *format,...)
float DegToRad(const float deg)
void AlignToENUPlane(Reconstruction *recon, SimilarityTransform3 *tform, bool unscaled)
void AlignToPrincipalPlane(Reconstruction *recon, SimilarityTransform3 *tform)
double max_line_vp_distance
double line_orientation_tolerance