22 #include <shogun/lib/external/brent.h>
25 using namespace shogun;
26 using namespace Eigen;
31 #ifndef DOXYGEN_SHOULD_SKIP_THIS
34 class CPsiLine :
public func_base
49 virtual double operator() (
double x)
51 Map<VectorXd> eigen_f(f->vector, f->vlen);
52 Map<VectorXd> eigen_m(m->vector, m->vlen);
55 (*alpha)=start_alpha+x*dalpha;
56 eigen_f=K*(*alpha)*
CMath::sq(scale)+eigen_m;
59 (*dlp)=lik->get_log_probability_derivative_f(lab, (*f), 1);
61 (*W)=lik->get_log_probability_derivative_f(lab, (*f), 2);
65 float64_t result = (*alpha).dot(eigen_f-eigen_m)/2.0-
86 void CLaplacianInferenceMethod::init()
90 m_opt_tolerance=1e-10;
127 Map<VectorXd> eigen_mu(m_mu.
vector, m_mu.
vlen);
133 Map<VectorXd> eigen_mean(mean.
vector, mean.
vlen);
141 if (eigen_W.minCoeff()<0)
149 result=(eigen_alpha.dot(eigen_mu-eigen_mean))/2.0-
150 lp+log(lu.determinant())/2.0;
154 result=eigen_alpha.
dot(eigen_mu-eigen_mean)/2.0-lp+
155 eigen_L.diagonal().array().log().sum();
204 MatrixXd eigen_V=eigen_L.triangularView<Upper>().adjoint().solve(
225 if (eigen_W.minCoeff() < 0)
228 VectorXd eigen_iW = (VectorXd::Ones(W.
vlen)).cwiseQuotient(eigen_W);
230 FullPivLU<MatrixXd> lu(
242 (eigen_sW*eigen_sW.transpose()).cwiseProduct(eigen_ktrtr*
CMath::sq(
m_scale))+
245 eigen_L = L.matrixU();
257 Map<VectorXd> eigen_mean(mean.
vector, mean.
vlen);
264 Map<VectorXd> eigen_mu(m_mu, m_mu.
vlen);
293 Psi_New=eigen_alpha.dot(eigen_mu-eigen_mean)/2.0-
299 if (Psi_Def < Psi_New)
319 while (Psi_Old-Psi_New>m_tolerance && iter<m_iter)
322 Map<VectorXd> eigen_dlp(dlp.
vector, dlp.
vlen);
327 if (eigen_W.minCoeff() < 0)
343 eigen_W+=(2.0/df)*eigen_dlp.cwiseProduct(eigen_dlp);
347 eigen_sW=eigen_W.cwiseSqrt();
349 LLT<MatrixXd> L((eigen_sW*eigen_sW.transpose()).cwiseProduct(eigen_ktrtr*
CMath::sq(
m_scale))+
352 VectorXd b=eigen_W.cwiseProduct(eigen_mu - eigen_mean)+eigen_dlp;
354 VectorXd dalpha=b-eigen_sW.cwiseProduct(
355 L.solve(eigen_sW.cwiseProduct(eigen_ktrtr*b*
CMath::sq(
m_scale))))-eigen_alpha;
363 func.start_alpha=eigen_alpha;
364 func.alpha=&eigen_alpha;
373 Psi_New=local_min(0, m_opt_max, m_opt_tolerance, func, x);
391 if (eigen_W.minCoeff()>0)
392 eigen_sW=eigen_W.cwiseSqrt();
402 Map<VectorXd> eigen_dlp(dlp.
vector, dlp.
vlen);
403 Map<VectorXd> eigen_d3lp(d3lp.
vector, d3lp.
vlen);
416 if (eigen_W.minCoeff()<0)
431 eigen_Z=eigen_L.triangularView<Upper>().adjoint().solve(
432 MatrixXd(eigen_sW.asDiagonal()));
433 eigen_Z=eigen_L.triangularView<Upper>().solve(eigen_Z);
434 eigen_Z=eigen_sW.asDiagonal()*eigen_Z;
437 MatrixXd C=eigen_L.triangularView<Upper>().adjoint().solve(
442 (C.cwiseProduct(C)).colwise().sum().adjoint())/2.0;
447 Map<VectorXd> eigen_dfhat(m_dfhat.
vector, m_dfhat.
vlen);
450 eigen_dfhat=eigen_g.cwiseProduct(eigen_d3lp);
456 REQUIRE(!strcmp(param->
m_name,
"scale"),
"Can't compute derivative of "
457 "the nagative log marginal likelihood wrt %s.%s parameter\n",
463 Map<VectorXd> eigen_dfhat(m_dfhat.
vector, m_dfhat.
vlen);
464 Map<VectorXd> eigen_dlp(dlp.
vector, dlp.
vlen);
470 MatrixXd dK=eigen_K*
m_scale*2.0;
473 result[0]=(eigen_Z.cwiseProduct(dK)).sum()/2.0-
474 (eigen_alpha.adjoint()*dK).dot(eigen_alpha)/2.0;
477 VectorXd b=dK*eigen_dlp;
492 Map<VectorXd> eigen_dfhat(m_dfhat.
vector, m_dfhat.
vlen);
503 Map<VectorXd> eigen_lp_dhyp(lp_dhyp.
vector, lp_dhyp.
vlen);
504 Map<VectorXd> eigen_dlp_dhyp(dlp_dhyp.
vector, dlp_dhyp.
vlen);
505 Map<VectorXd> eigen_d2lp_dhyp(d2lp_dhyp.
vector, d2lp_dhyp.
vlen);
510 VectorXd b=eigen_K*eigen_dlp_dhyp;
513 result[0]=-eigen_g.
dot(eigen_d2lp_dhyp)-eigen_lp_dhyp.sum()-
525 Map<VectorXd> eigen_dfhat(m_dfhat.
vector, m_dfhat.
vlen);
526 Map<VectorXd> eigen_dlp(dlp.
vector, dlp.
vlen);
535 "Length of the parameter %s should not be NULL\n", param->
m_name)
555 result[i]=(eigen_Z.cwiseProduct(eigen_dK)).sum()/2.0-
556 (eigen_alpha.adjoint()*eigen_dK).dot(eigen_alpha)/2.0;
559 VectorXd b=eigen_dK*eigen_dlp;
575 Map<VectorXd> eigen_dfhat(m_dfhat.
vector, m_dfhat.
vlen);
584 "Length of the parameter %s should not be NULL\n", param->
m_name)
602 Map<VectorXd> eigen_dmu(dmu.
vector, dmu.
vlen);
605 result[i]=-eigen_alpha.
dot(eigen_dmu)-eigen_dfhat.dot(eigen_dmu-