14 #include <dlib/matrix.h>
25 const std::vector<float>& labels)
const {
26 size_t fdim = samplesvec[0].size();
27 size_t nsamples = samplesvec.size();
29 long ndata_class1 = 0, ndata_class2 = 0;
30 for (
size_t i = 0; i < nsamples; ++i) {
37 dlib::matrix<sample_type, 0, 1> samples1, samples2;
38 samples1.set_size(ndata_class1);
39 samples2.set_size(ndata_class2);
44 for (
size_t i = 0; i < fdim; ++i) {
51 for (
size_t i = 0; i < nsamples; ++i) {
53 samples1(ndata_class1) = samplesvec[i];
57 samples2(ndata_class2) = samplesvec[i];
75 dlib::matrix<float> sigma1 = covariance(samples1);
76 dlib::matrix<float> sigma2 = covariance(samples2);
78 sample_type w_vect = pinv(sigma1 + sigma2) * (mu2 - mu1);
84 ret.b =
dot(w_vect, (mu1 + mu2) * 0.5);
86 ret.basis_vectors.set_size(fdim);
87 for (
size_t i = 0; i < fdim; ++i) {
88 ret.basis_vectors(i).set_size(fdim);
89 for (
size_t j = 0; j < fdim; ++j) ret.basis_vectors(i)(j) = 0;
90 ret.basis_vectors(i)(i) = 1;
96 trained_function_type train(
const trained_function_type::sample_vector_type& samplesvec,
const trained_function_type::scalar_vector_type& labels)
const
98 int fdim = samplesvec(0).size();
99 int nsamples = samplesvec.size();
101 int ndata_class1 = 0, ndata_class2 = 0;
102 for (
int i=0; i<nsamples; ++i)
110 dlib::matrix<sample_type,0,1> samples1, samples2;
111 samples1.set_size(ndata_class1);
112 samples2.set_size(ndata_class2);
115 for (
int i=0; i<fdim; ++i)
121 ndata_class1 = 0; ndata_class2 = 0;
122 for (
int i=0; i<nsamples; ++i)
126 samples1(ndata_class1) = samplesvec(i);
128 mu1 += samplesvec(i);
132 samples2(ndata_class2) = samplesvec(i);
134 mu2 += samplesvec(i);
149 dlib::matrix<float> sigma1 = covariance(samples1);
150 dlib::matrix<float> sigma2 = covariance(samples2);
152 sample_type w_vect = pinv(sigma1+sigma2) * (mu2 - mu1);
158 ret.b =
dot(w_vect,(mu1+mu2)*0.5);
160 ret.basis_vectors.set_size(fdim);
161 for (
int i=0; i<fdim; ++i)
163 ret.basis_vectors(i).set_size(fdim);
164 for (
int j=0; j<fdim; ++j)
165 ret.basis_vectors(i)(j)=0;
166 ret.basis_vectors(i)(i) = 1;
178 const std::vector<sample_type>& samples,
179 const std::vector<float>& labels) {
180 dlib::probabilistic_decision_function<kernel_type> pdecfun =
181 dlib::train_probabilistic_decision_function(*
this, samples,
183 dlib::decision_function<kernel_type>& decfun = pdecfun.decision_funct;
184 int dim = samples.back().size();
188 dlib::matrix<float> w(dim, 1);
190 for (
int i = 0; i < decfun.alpha.nr(); ++i) {
191 w += decfun.alpha(i) * decfun.basis_vectors(i);
193 for (
int i = 0; i < dim; ++i)
m_weights[i] = w(i);
195 for (
int i = 0; i <= dim; ++i)
m_weights[i] *= pdecfun.alpha;
205 for (
size_t d = 0; d <
m_weights.size() - 1; ++d)
206 ret +=
static_cast<double>(
m_weights[d]) * data(d);
215 static void GramSchmidt(dlib::matrix<LDATrainer::sample_type, 0, 1>& basis,
222 long dim = basis.size();
223 double maxabsdp = -1.0;
224 long selectedCoord = 0;
225 for (
long i = 0; i < dim; ++i) {
226 double absdp =
fabs(
dot(basis(i), newX));
227 if (absdp > maxabsdp) {
234 basis(selectedCoord) = basis(0);
238 for (
long j = 0; j < dim; ++j) {
239 for (
long i = 0; i < j; ++i)
240 basis(j) -= (
dot(basis(j), basis(i)) /
dot(basis(i), basis(i))) *
243 basis(j) /= sqrt(
dot(basis(j), basis(j)));
250 const std::vector<float>& proj1,
251 const std::vector<float>& proj2,
252 const std::vector<float>& labels,
254 unsigned* _nneg = 0) {
255 assert(proj1.size() == proj2.size() && proj1.size() == labels.size());
261 for (
size_t i = 0; i < labels.size(); ++i) {
270 if (npos) refpt_pos /=
static_cast<float>(npos);
271 if (nneg) refpt_neg /=
static_cast<float>(nneg);
273 if (_npos) *_npos = npos;
274 if (_nneg) *_nneg = nneg;
281 std::vector<float>& proj1,
282 std::vector<float>& proj2,
283 const std::vector<float>& labels,
284 const std::vector<LDATrainer::sample_type>& samples,
294 float m11 = 0, m21 = 0, m12 = 0, m22 = 0;
295 float v11 = 0, v12 = 0, v21 = 0, v22 = 0;
296 size_t nsamples1 = 0;
297 size_t nsamples2 = 0;
298 size_t nsamples = proj1.size();
299 assert(proj1.size() == proj2.size());
300 for (
size_t i = 0; i < nsamples; ++i) {
303 float p1 = p.
dot(e1);
304 float p2 = p.
dot(e2);
320 v11 = (v11 - m11 * m11 * nsamples1) / (nsamples1 - 1);
322 v21 = (v21 - m21 * m21 * nsamples2) / (nsamples2 - 1);
324 v12 = (v12 - m12 * m12 * nsamples1) / (nsamples1 - 1);
326 v22 = (v22 - m22 * m22 * nsamples2) / (nsamples2 - 1);
328 float d1 = sqrt(v11 / v12);
329 float d2 = sqrt(v21 / v22);
334 dlib::matrix<float, 2, 2> bd(bdValues);
336 float biValues[4] = {e1.
x, e2.
x, e1.
y, e2.
y};
337 dlib::matrix<float, 2, 2> bi(biValues);
338 dlib::matrix<float, 2, 2> c = inv(trans(bd)) ;
340 std::vector<float>& w1 = trainer.
m_weights;
341 std::vector<float>& w2 = orthoTrainer.
m_weights;
342 assert(w1.size() == w2.size());
344 std::vector<float> wn1, wn2;
346 wn1.resize(w1.size());
347 wn2.resize(w2.size());
348 }
catch (
const std::bad_alloc&) {
358 for (
size_t i = 0; i < w1.size(); ++i) {
359 wn1[i] = c(0, 0) * w1[i] + c(0, 1) * w2[i];
360 wn2[i] = c(1, 0) * w1[i] + c(1, 1) * w2[i];
369 for (
size_t i = 0; i < nsamples; ++i) {
370 proj1[i] = trainer.
predict(samples[i]);
371 proj2[i] = orthoTrainer.
predict(samples[i]);
380 proj1, proj2, labels);
std::vector< float > weightsAxis2
Vector2Tpl< float > Point2D
2D point
std::vector< float > weightsAxis1
double predict(const sample_type &data) const
std::vector< float > m_weights
Classifier weights.
trained_function_type train(const std::vector< sample_type > &samplesvec, const std::vector< float > &labels) const
dlib::decision_function< kernel_type > trained_function_type
dlib::linear_kernel< sample_type > kernel_type
void train(int nfolds, const std::vector< sample_type > &samples, const std::vector< float > &labels)
dlib::matrix< float, 0, 1 > sample_type
Type dot(const Vector2Tpl &v) const
Dot product.
void normalize()
Sets vector norm to unity.
__host__ __device__ float dot(float2 a, float2 b)
__host__ __device__ float2 fabs(float2 v)
static void GramSchmidt(dlib::matrix< LDATrainer::sample_type, 0, 1 > &basis, LDATrainer::sample_type &newX)
Gram-Schmidt process to re-orthonormalise the basis.
static bool DilateClassifier(Classifier &classifier, std::vector< float > &proj1, std::vector< float > &proj2, const std::vector< float > &labels, const std::vector< LDATrainer::sample_type > &samples, LDATrainer &trainer, LDATrainer &orthoTrainer)
static void ComputeReferencePoints(Classifier::Point2D &refpt_pos, Classifier::Point2D &refpt_neg, const std::vector< float > &proj1, const std::vector< float > &proj2, const std::vector< float > &labels, unsigned *_npos=0, unsigned *_nneg=0)
Compute pos. and neg. reference points.