12 #include "pipelines/registration/RobustKernel.h"
18 namespace registration {
20 template <
class RobustKernelBase = RobustKernel>
23 using RobustKernelBase::RobustKernelBase;
24 double Weight(
double residual)
const override {
25 PYBIND11_OVERLOAD_PURE(
double, RobustKernelBase, residual);
40 py::class_<RobustKernel, std::shared_ptr<RobustKernel>,
PyRobustKernel> rk(
43 Base class that models a robust kernel for outlier rejection. The virtual
44 function ``weight()`` must be implemented in derived classes.
46 The main idea of a robust loss is to downweight large residuals that are
47 assumed to be caused from outliers such that their influence on the solution
48 is reduced. This is achieved by optimizing:
51 \def\argmin{\mathop{\rm argmin}}
53 x^{*} = \argmin_{x} \sum_{i=1}^{N} \rho({r_i(x)})
57 where :math:`\rho(r)` is also called the robust loss or kernel and
58 :math:`r_i(x)` is the residual.
60 Several robust kernels have been proposed to deal with different kinds of
61 outliers such as Huber, Cauchy, and others.
63 The optimization problem in :eq:`robust_loss` can be solved using the
64 iteratively reweighted least squares (IRLS) approach, which solves a sequence
65 of weighted least squares problems. We can see the relation between the least
66 squares optimization in stanad non-linear least squares and robust loss
67 optimization by comparing the respective gradients which go to zero at the
68 optimum (illustrated only for the :math:`i^\mathrm{th}` residual):
72 \frac{1}{2}\frac{\partial (w_i r^2_i(x))}{\partial{x}}
74 w_i r_i(x) \frac{\partial r_i(x)}{\partial{x}} \\
75 \label{eq:gradient_ls}
76 \frac{\partial(\rho(r_i(x)))}{\partial{x}}
78 \rho'(r_i(x)) \frac{\partial r_i(x)}{\partial{x}}.
81 By setting the weight :math:`w_i= \frac{1}{r_i(x)}\rho'(r_i(x))`, we
82 can solve the robust loss optimization problem by using the existing techniques
83 for weighted least-squares. This scheme allows standard solvers using
84 Gauss-Newton and Levenberg-Marquardt algorithms to optimize for robust losses
85 and is the one implemented in CloudViewer.
87 Then we minimize the objective function using Gauss-Newton and determine
88 increments by iteratively solving:
91 \newcommand{\mat}[1]{\mathbf{#1}}
92 \newcommand{\veca}[1]{\vec{#1}}
93 \renewcommand{\vec}[1]{\mathbf{#1}}
95 \left(\mat{J}^\top \mat{W} \mat{J}\right)^{-1}\mat{J}^\top\mat{W}\vec{r},
98 where :math:`\mat{W} \in \mathbb{R}^{n\times n}` is a diagonal matrix containing
99 weights :math:`w_i` for each residual :math:`r_i`
101 The different loss functions will only impact in the weight for each residual
102 during the optimization step.
103 Therefore, the only impact of the choice on the kernel is thorugh its first
106 The kernels implemented so far, and the notation has been inspired by the
107 publication: **"Analysis of Robust Functions for Registration Algorithms"**, by
108 Philippe Babin et al.
110 For more information please also see: **"Adaptive Robust Kernels for
111 Non-Linear Least Squares Problems"**, by Nived Chebrolu et al.
114 "Obtain the weight for the given residual according to the "
115 "robust kernel model.");
117 m,
"RobustKernel",
"weight",
118 {{
"residual",
"value obtained during the optimization problem"}});
124 The loss :math:`\rho(r)` for a given residual ``r`` is given by:
126 .. math:: \rho(r) = \frac{r^2}{2}
128 The weight :math:`w(r)` for a given residual ``r`` is given by:
132 py::detail::bind_default_constructor<L2Loss>(l2_loss);
133 py::detail::bind_copy_functions<L2Loss>(l2_loss);
134 l2_loss.def("__repr__", [](
const L2Loss &rk) {
136 return "RobustKernel::L2Loss";
143 The loss :math:`\rho(r)` for a given residual ``r`` is given by:
145 .. math:: \rho(r) = |r|
147 The weight :math:`w(r)` for a given residual ``r`` is given by:
149 .. math:: w(r) = \frac{1}{|r|}
151 py::detail::bind_default_constructor<L1Loss>(l1_loss);
152 py::detail::bind_copy_functions<L1Loss>(l1_loss);
153 l1_loss.def("__repr__", [](
const L1Loss &rk) {
155 return "RobustKernel::L1Loss";
160 h_loss(m,
"HuberLoss",
162 The loss :math:`\rho(r)` for a given residual ``r`` is:
168 \frac{r^{2}}{2}, & |r| \leq k.\\
169 k(|r|-k / 2), & \text{otherwise}.
173 The weight :math:`w(r)` for a given residual ``r`` is given by:
180 \frac{k}{|r|} , & \text{otherwise}.
184 py::detail::bind_copy_functions<HuberLoss>(h_loss);
186 [](double k) {
return std::make_shared<HuberLoss>(k); }),
190 return std::string(
"RobustKernel::HuberLoss with k=") +
193 .def_readwrite(
"k", &
HuberLoss::k_,
"Parameter of the loss");
196 py::class_<CauchyLoss, std::shared_ptr<CauchyLoss>,
PyCauchyLoss,
198 c_loss(m,
"CauchyLoss",
200 The loss :math:`\rho(r)` for a given residual ``r`` is:
205 \frac{k^2}{2} \log\left(1 + \left(\frac{r}{k}\right)^2\right)
208 The weight :math:`w(r)` for a given residual ``r`` is given by:
213 \frac{1}{1 + \left(\frac{r}{k}\right)^2}
216 py::detail::bind_copy_functions<CauchyLoss>(c_loss);
217 c_loss.def(py::init([](double k) {
218 return std::make_shared<CauchyLoss>(k);
223 return std::string(
"RobustKernel::CauchyLoss with k=") +
232 The loss :math:`\rho(r)` for a given residual ``r`` is:
237 \frac{r^2/ 2}{k + r^2}
240 The weight :math:`w(r)` for a given residual ``r`` is given by:
245 \frac{k}{\left(k + r^2\right)^2}
248 py::detail::bind_copy_functions<GMLoss>(gm_loss);
249 gm_loss.def(py::init([](double k) {
return std::make_shared<GMLoss>(k); }),
253 return std::string(
"RobustKernel::GMLoss with k=") +
256 .def_readwrite(
"k", &
GMLoss::k_,
"Parameter of the loss.");
260 t_loss(m,
"TukeyLoss",
262 The loss :math:`\rho(r)` for a given residual ``r`` is:
268 \frac{k^2\left[1-\left(1-\left(\frac{e}{k}\right)^2\right)^3\right]}{2}, & |r| \leq k. \\
269 \frac{k^2}{2}, & \text{otherwise}.
273 The weight :math:`w(r)` for a given residual ``r`` is given by:
279 \left(1 - \left(\frac{r}{k}\right)^2\right)^2, & |r| \leq k. \\
280 0 , & \text{otherwise}.
284 py::detail::bind_copy_functions<TukeyLoss>(t_loss);
286 [](double k) {
return std::make_shared<TukeyLoss>(k); }),
290 return std::string(
"RobustKernel::TukeyLoss with k=") +
294 "``k`` Is a tunning constant for the loss.");
double k_
Scaling paramter.
double k_
Scaling paramter.
double k_
Scaling paramter.
double Weight(double residual) const override
virtual double Weight(double residual) const =0
void ClassMethodDocInject(py::module &pybind_module, const std::string &class_name, const std::string &function_name, const std::unordered_map< std::string, std::string > &map_parameter_body_docs)
void pybind_robust_kernels(py::module &m)
PyRobustKernelT< L2Loss > PyL2Loss
PyRobustKernelT< HuberLoss > PyHuberLoss
PyRobustKernelT< GMLoss > PyGMLoss
PyRobustKernelT< CauchyLoss > PyCauchyLoss
PyRobustKernelT< L1Loss > PyL1Loss
PyRobustKernelT< TukeyLoss > PyTukeyLoss
Generic file read and write utility for python interface.
std::string to_string(const T &n)