10 #include "pipelines/model.h"
15 namespace reconstruction {
20 static const std::unordered_map<std::string, std::string>
23 "The input path containing cameras.bin/txt, images.bin/txt "
24 "and points3D.bin/txt."},
26 "The output path containing target cameras.bin/txt, "
27 "images.bin/txt and points3D.bin/txt."},
29 "Path to database in which to store the extracted data."},
31 "Path to text file containing reference images per line."},
33 "The alignment transformation matrix saving path."},
35 "Alignment type: supported values are {plane, ecef, enu, "
36 "enu-unscaled, custom}."},
38 "Maximum error for a sample to be considered as an inlier. "
39 "Note that the residual of an estimator corresponds to a "
41 {
"min_common_images",
"Minimum common images."},
42 {
"robust_alignment",
"Whether align robustly or not."},
43 {
"estimate_scale",
"Whether estimate scale or not."},
44 {
"min_inlier_observations",
45 "The threshold determines how many observations in a common "
46 "image must reproject within the given threshold.."},
47 {
"max_reproj_error",
"The Maximum re-projection error."},
49 "The supported output type values are {BIN, TXT, NVM, "
50 "Bundler, VRML, PLY, R3D, CAM}."},
52 "Whether skip distortion or no. When skip_distortion == true "
53 "it supports all camera models with the caveat that it's "
54 "using the mean focal length which will be inaccurate for "
55 "camera models with two focal lengths and distortion."},
56 {
"boundary",
"The cropping boundary coordinates."},
57 {
"gps_transform_path",
58 "The gps transformation parameters file path."},
60 "The supported Model Orientation Alignment values are "
61 "{MANHATTAN-WORLD, IMAGE-ORIENTATION}."},
63 "The maximum image size for line detection."},
65 "The supported split type values are {tiles, extent, parts}."},
66 {
"split_params",
"The split parameters file path."},
67 {
"num_threads",
"The number of cpu thread."},
68 {
"min_reg_images",
"The minimum number of reg images."},
69 {
"min_num_points",
"The minimum number of points."},
70 {
"overlap_ratio",
"The overlapped ratio."},
71 {
"min_area_ratio",
"The minimum area ratio."},
72 {
"is_inverse",
"Whether inverse or not."}};
75 m.def(
"align_model", &AlignModel, py::call_guard<py::gil_scoped_release>(),
76 "Function for the alignment of model",
"input_path"_a,
77 "output_path"_a,
"database_path"_a =
"",
"ref_images_path"_a =
"",
78 "transform_path"_a =
"",
"alignment_type"_a =
"plane",
79 "max_error"_a = 0.0,
"min_common_images"_a = 3,
80 "robust_alignment"_a =
true,
"estimate_scale"_a =
true);
84 m.def(
"analyze_model", &AnalyzeModel,
85 py::call_guard<py::gil_scoped_release>(),
86 "Function for the analyse of model",
"input_path"_a);
90 m.def(
"compare_model", &CompareModel,
91 py::call_guard<py::gil_scoped_release>(),
92 "Function for the comparison of model",
"input_path1"_a,
93 "input_path2"_a,
"output_path"_a =
"",
94 "min_inlier_observations"_a = 0.3,
"max_reproj_error"_a = 8.0);
98 m.def(
"convert_model", &ConvertModel,
99 py::call_guard<py::gil_scoped_release>(),
100 "Function for the convertion of model",
"input_path"_a,
101 "output_path"_a,
"output_type"_a,
"skip_distortion"_a =
false);
105 m.def(
"crop_model", &CropModel, py::call_guard<py::gil_scoped_release>(),
106 "Function for the cropping of model",
"input_path"_a,
"output_path"_a,
107 "boundary"_a,
"gps_transform_path"_a =
"");
111 m.def(
"merge_model", &MergeModel, py::call_guard<py::gil_scoped_release>(),
112 "Function for the merging of model",
"input_path1"_a,
"input_path2"_a,
113 "output_path"_a,
"max_reproj_error"_a = 64.0);
117 m.def(
"align_model_orientation", &AlignModelOrientation,
118 py::call_guard<py::gil_scoped_release>(),
119 "Function for the orientation alignment of model",
"image_path"_a,
120 "input_path"_a,
"output_path"_a,
"method"_a =
"MANHATTAN-WORLD",
121 "max_image_size"_a = 1024);
125 m.def(
"split_model", &SplitModel, py::call_guard<py::gil_scoped_release>(),
126 "Function for the splitting of model",
"input_path"_a,
127 "output_path"_a,
"split_type"_a,
"split_params"_a,
128 "gps_transform_path"_a =
"",
"min_reg_images"_a = 10,
129 "min_num_points"_a = 100,
"overlap_ratio"_a = 0.0,
130 "min_area_ratio"_a = 0.0,
"num_threads"_a = -1);
134 m.def(
"transform_model", &TransformModel,
135 py::call_guard<py::gil_scoped_release>(),
136 "Function for the transformation of model",
"input_path"_a,
137 "output_path"_a,
"transform_path"_a,
"is_inverse"_a =
false);
143 py::module m_submodule = m.def_submodule(
"model",
"Reconstruction model.");
void FunctionDocInject(py::module &pybind_module, const std::string &function_name, const std::unordered_map< std::string, std::string > &map_parameter_body_docs)
static const std::unordered_map< std::string, std::string > map_shared_argument_docstrings
void pybind_model(py::module &m)
void pybind_model_methods(py::module &m)
Generic file read and write utility for python interface.