本文整理汇总了C++中eigen::MatrixXd::diagonal方法的典型用法代码示例。如果您正苦于以下问题:C++ MatrixXd::diagonal方法的具体用法?C++ MatrixXd::diagonal怎么用?C++ MatrixXd::diagonal使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::MatrixXd
的用法示例。
在下文中一共展示了MatrixXd::diagonal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: L
TEST_F(TriangularSolveTest, ForwardSubstBandedMatchEigenComputedResults)
{
int n=6;
int m=3;
Eigen::MatrixXd L = Eigen::MatrixXd::Random(n,n);
Eigen::VectorXd b = Eigen::VectorXd::Random(n);
L.triangularView<Eigen::StrictlyUpper>().setZero();
for (int j=0; j<n; j++) {
for (int i=j; i<n; i++) {
if (i-j>m) {
L(i,j) = 0;
}
}
}
Eigen::VectorXd x = forward_subst_banded(L, m, b);
Eigen::VectorXd y = L.triangularView<Eigen::Lower>().solve(b);
double tol = 1e-13;
EXPECT_NEAR((x-y).norm(), 0, tol);
EXPECT_NEAR((L*x-b).norm(), 0, tol);
// band storage solution
Eigen::ArrayXXd a = Eigen::ArrayXXd::Zero(n,m+1);
a.col(m) = L.diagonal();
for (int i=1; i<=m; i++) {
a.col(m-i).block(i,0,n-i,1) = L.diagonal(-i);
}
Eigen::VectorXd z = forward_subst_banded(a,b);
tol = 1e-16;
EXPECT_NEAR((x-z).norm(), 0, tol);
}
示例2: U
TEST_F(TriangularSolveTest, BackwardSubstBandedMatchEigenComputedResults)
{
int n=6;
int m=3;
Eigen::MatrixXd U = Eigen::MatrixXd::Random(n,n);
Eigen::VectorXd b = Eigen::VectorXd::Random(n);
U.triangularView<Eigen::StrictlyLower>().setZero();
for (int i=0; i<n; i++) {
for (int j=i; j<n; j++) {
if (j-i>m) {
U(i,j) = 0;
}
}
}
Eigen::VectorXd x = backward_subst_banded(U, m, b);
Eigen::VectorXd y = U.triangularView<Eigen::Upper>().solve(b);
double tol = 1e-13;
EXPECT_NEAR((x-y).norm(), 0, tol);
EXPECT_NEAR((U*x-b).norm(), 0, tol);
// band storage solution
Eigen::ArrayXXd a = Eigen::ArrayXXd::Zero(n,m+1);
a.col(0) = U.diagonal();
for (int i=1; i<=m; i++) {
a.col(i).block(0,0,n-i,1) = U.diagonal(i);
}
Eigen::VectorXd z = backward_subst_banded(a,b);
tol = 1e-16;
EXPECT_NEAR((x-z).norm(), 0, tol);
}
示例3: kalmanFunc
Eigen::MatrixXd kalmanFilter::kalmanFunc(Eigen::MatrixXd phi, Eigen::MatrixXd upsilon, Eigen::MatrixXd basis, Eigen::MatrixXd initial, Eigen::MatrixXd initial_cov, int measurements, Eigen::MatrixXd noise){
Eigen::MatrixXd x(measurements,2), xe(measurements,2), ym(measurements,1), covariance(measurements,2);
Eigen::MatrixXd gain;
x.setZero(measurements,2);
x.row(0) = initial;
xe.setZero(measurements,2);
ym.setZero(measurements,1);
ym.row(0) = (basis*x.row(0).transpose()).transpose()+(noise.cwiseSqrt().row(0))*Eigen::MatrixXd::Random(1,1);
covariance.setZero(measurements,2);
covariance.row(0) = initial_cov.diagonal().transpose();
// Main loop
for(int i=0; i<(measurements-1); i++){
// Truth and Measurements
x.row(i+1) = (phi*x.row(i).transpose()+upsilon*(noise.cwiseSqrt().row(1))*Eigen::MatrixXd::Random(1,1)).transpose();
ym.row(i+1) = (basis*x.row(i+1).transpose()).transpose()+(noise.cwiseSqrt().row(0))*Eigen::MatrixXd::Random(1,1);
// Update Equations
gain = initial_cov*basis.transpose()*((basis*initial_cov*basis.transpose())+noise.row(0)).cwiseInverse();
initial_cov = (Eigen::MatrixXd::Identity(2,2)-gain*basis)*initial_cov;
xe.row(i) = xe.row(i)+(gain*(ym.row(i)-basis*xe.row(i).transpose())).transpose();
// Propagation Equations
xe.row(i+1) = (phi*xe.row(i).transpose()).transpose();
initial_cov = phi*initial_cov*phi.transpose()+upsilon*noise.row(1)*upsilon.transpose();
covariance.row(i+1) = initial_cov.diagonal().transpose();
}
Eigen::MatrixXd result(measurements,6);
result << xe, x, (covariance.cwiseSqrt())*3;
return result;
}
示例4: doubleLayer
Eigen::MatrixXd doubleLayer(const Vacuum<DerivativeTraits, PurisimaIntegrator> & gf, const std::vector<Element> & e) const {
// Obtain off-diagonal first
Eigen::MatrixXd D = offDiagonalD(e, gf.exportKernelD());
// Fill diagonal based on Purisima's formula
Eigen::VectorXd D_diag = diagonalD(e, D);
D.diagonal() = D_diag;
return D;
}
示例5: band_from_dense
Eigen::ArrayXXd band_from_dense(const Eigen::MatrixXd & A, int m1, int m2)
{
int n = A.rows();
assert(n == A.cols());
Eigen::ArrayXXd a = Eigen::ArrayXXd::Zero(n,m1+m2+1);
a.col(m1) = A.diagonal();
for (int i=1; i<=m1; i++) {
a.col(m1-i).block(i,0,n-i,1) = A.diagonal(-i);
}
for (int i=1; i<=m2; i++) {
a.col(m1+i).block(0,0,n-i,1) = A.diagonal(i);
}
return a;
}
示例6: calc_grad
void calc_grad(normal_fullrank& elbo_grad,
M& m,
Eigen::VectorXd& cont_params,
int n_monte_carlo_grad,
BaseRNG& rng,
std::ostream* print_stream) const {
static const char* function =
"stan::variational::normal_fullrank::calc_grad";
stan::math::check_size_match(function,
"Dimension of elbo_grad", elbo_grad.dimension(),
"Dimension of variational q", dimension_);
stan::math::check_size_match(function,
"Dimension of variational q", dimension_,
"Dimension of variables in model", cont_params.size());
// Initialize everything to zero
Eigen::VectorXd mu_grad = Eigen::VectorXd::Zero(dimension_);
Eigen::MatrixXd L_grad = Eigen::MatrixXd::Zero(dimension_, dimension_);
double tmp_lp = 0.0;
Eigen::VectorXd tmp_mu_grad = Eigen::VectorXd::Zero(dimension_);
Eigen::VectorXd eta = Eigen::VectorXd::Zero(dimension_);
Eigen::VectorXd zeta = Eigen::VectorXd::Zero(dimension_);
// Naive Monte Carlo integration
for (int i = 0; i < n_monte_carlo_grad; ++i) {
// Draw from standard normal and transform to real-coordinate space
for (int d = 0; d < dimension_; ++d) {
eta(d) = stan::math::normal_rng(0, 1, rng);
}
zeta = transform(eta);
// Compute gradient step in real-coordinate space
stan::model::gradient(m, zeta, tmp_lp, tmp_mu_grad, print_stream);
// Update gradient parameters
mu_grad += tmp_mu_grad;
for (int ii = 0; ii < dimension_; ++ii) {
for (int jj = 0; jj <= ii; ++jj) {
L_grad(ii, jj) += tmp_mu_grad(ii) * eta(jj);
}
}
}
mu_grad /= static_cast<double>(n_monte_carlo_grad);
L_grad /= static_cast<double>(n_monte_carlo_grad);
// Add gradient of entropy term
L_grad.diagonal().array() += L_chol_.diagonal().array().inverse();
// Set parameters to argument
elbo_grad.set_mu(mu_grad);
elbo_grad.set_L_chol(L_grad);
}
示例7: log_likelihood
// see Rasmussen and Williams, 2006 (p. 113)
double log_likelihood(const Eigen::VectorXd& h_params,
bool update_kernel = true) {
this->_kernel_function.set_h_params(h_params);
if (update_kernel)
this->_compute_kernel();
size_t n = this->_obs_mean.size();
// --- cholesky ---
// see: http://xcorr.net/2008/06/11/log-determinant-of-positive-definite-matrices-in-matlab/
Eigen::MatrixXd l = this->_llt.matrixL();
long double det = 2 * l.diagonal().array().log().sum();
// alpha = K^{-1} * this->_obs_mean;
double a = this->_obs_mean.dot(this->_alpha);
return -0.5 * a - 0.5 * det - 0.5 * n * log(2 * M_PI);
}
示例8: analyze
void TrajectoryAnalyzer::analyze(const Eigen::MatrixXd & nma_covariance,
std::vector<std::shared_ptr<ProteinSegment>> & protein_segments,
const double & temperature) {
LOGD << "Fitting protein segments with NMA covariance matrix and computing force constants.";
for (std::shared_ptr<ProteinSegment> const & protein_segment : protein_segments) {
Eigen::MatrixXd covariance = nma_covariance.block((protein_segment->get_start_residuum_nr() - 1) * 3,
(protein_segment->get_start_residuum_nr() - 1) * 3,
protein_segment->get_size() * 3,
protein_segment->get_size() * 3);
Eigen::VectorXd displacement_vector = covariance.diagonal().array().sqrt().matrix();
Eigen::MatrixXd kk_matrix = Eigen::MatrixXd::Zero(protein_segment->get_size(), protein_segment->get_size());
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eig(covariance);
for (int i = 0; i < protein_segment->get_size(); i++) {
for (int j = 0; j < i; j++) {
Eigen::Vector3d kk = Eigen::Vector3d::Zero();
Eigen::Vector3d d = displacement_vector.segment<3>(3 * i)-displacement_vector.segment<3>(3 * j);
for(int k = (protein_segment->get_size() > 2 ? 6 : 5); k < protein_segment->get_size() * 3; k++) {
kk += Eigen::Vector3d(
eig.eigenvectors()(3 * i + 0, k) * eig.eigenvectors()(3 * j + 0, k),
eig.eigenvectors()(3 * i + 1, k) * eig.eigenvectors()(3 * j + 1, k),
eig.eigenvectors()(3 * i + 2, k) * eig.eigenvectors()(3 * j + 2, k))
/ eig.eigenvalues()[k];
}
kk_matrix(i, j) = -kk.sum() * 8.31 * temperature * 0.001;
kk_matrix(j, i) = d.norm();
}
}
protein_segment->add_force_constant(kk_matrix);
protein_segment->add_displacement_vector(displacement_vector);
protein_segment->add_mean_square_fluctuation(covariance);
}
LOGD << "Finished analyzing trajectory";
}
示例9: GetIndCEScoresCPP
Rcpp::List GetIndCEScoresCPP( const Eigen::Map<Eigen::VectorXd> & yVec, const Eigen::Map<Eigen::VectorXd> & muVec, const Eigen::Map<Eigen::VectorXd> & lamVec, const Eigen::Map<Eigen::MatrixXd> & phiMat, const Eigen::Map<Eigen::MatrixXd> & SigmaYi){
// Setting up initial values
const unsigned int lenlamVec = lamVec.size();
Eigen::MatrixXd xiVar = Eigen::MatrixXd::Constant(lenlamVec,lenlamVec,std::numeric_limits<double>::quiet_NaN());
Eigen::MatrixXd xiEst = Eigen::MatrixXd::Constant(lenlamVec,1,std::numeric_limits<double>::quiet_NaN());
Eigen::MatrixXd fittedY = Eigen::MatrixXd::Constant(lenlamVec,1,std::numeric_limits<double>::quiet_NaN());
Eigen::MatrixXd LamPhi = lamVec.asDiagonal() * phiMat.transpose();
Eigen::LDLT<Eigen::MatrixXd> ldlt_SigmaYi(SigmaYi);
xiEst = LamPhi * ldlt_SigmaYi.solve(yVec - muVec) ; // LamPhiSig * (yVec - muVec);
xiVar = -LamPhi * ldlt_SigmaYi.solve(LamPhi.transpose()); // LamPhiSig.transpose();
xiVar.diagonal() += lamVec;
fittedY = muVec + phiMat * xiEst;
return Rcpp::List::create(Rcpp::Named("xiEst") = xiEst,
Rcpp::Named("xiVar") = xiVar,
Rcpp::Named("fittedY") = fittedY);
}
示例10: corEigen
// Correlation implementation in Eigen
//' @rdname corFamily
//' @export
// [[Rcpp::export]]
Eigen::MatrixXd corEigen(Eigen::Map<Eigen::MatrixXd> & X) {
// Handle degenerate cases
if (X.rows() == 0 && X.cols() > 0) {
return Eigen::MatrixXd::Constant(X.cols(), X.cols(),
Rcpp::NumericVector::get_na());
}
// Computing degrees of freedom
// n - 1 is the unbiased estimate whereas n is the MLE
const int df = X.rows() - 1; // Subtract 1 by default
X.rowwise() -= X.colwise().mean(); // Centering
Eigen::MatrixXd cor = X.transpose() * X / df; // The covariance matrix
// Get 1 over the standard deviations
Eigen::VectorXd inv_sds = cor.diagonal().array().sqrt().inverse();
// Scale the covariance matrix
cor = cor.cwiseProduct(inv_sds * inv_sds.transpose());
return cor;
}
示例11: run
//.........这里部分代码省略.........
input_header.size(3) >= 6 &&
input_header.size(3) == (int) Math::SH::NforL (Math::SH::LforN (input_header.size(3)))) {
CONSOLE ("SH series detected, performing apodised PSF reorientation");
fod_reorientation = true;
Eigen::MatrixXd directions_az_el;
opt = get_options ("directions");
if (opt.size())
directions_az_el = load_matrix (opt[0][0]);
else
directions_az_el = DWI::Directions::electrostatic_repulsion_300();
Math::SH::spherical2cartesian (directions_az_el, directions_cartesian);
// load with SH coeffients contiguous in RAM
stride = Stride::contiguous_along_axis (3, input_header);
}
// Modulate FODs
bool modulate = false;
if (get_options ("modulate").size()) {
modulate = true;
if (!fod_reorientation)
throw Exception ("modulation can only be performed with FOD reorientation");
}
// Rotate/Flip gradient directions if present
if (linear && input_header.ndim() == 4 && !warp && !fod_reorientation) {
try {
auto grad = DWI::get_DW_scheme (input_header);
if (input_header.size(3) == (ssize_t) grad.rows()) {
INFO ("DW gradients detected and will be reoriented");
Eigen::MatrixXd rotation = linear_transform.linear();
Eigen::MatrixXd test = rotation.transpose() * rotation;
test = test.array() / test.diagonal().mean();
if (!test.isIdentity (0.001))
WARN ("the input linear transform contains shear or anisotropic scaling and "
"therefore should not be used to reorient diffusion gradients");
if (replace)
rotation = linear_transform.linear() * input_header.transform().linear().inverse();
for (ssize_t n = 0; n < grad.rows(); ++n) {
Eigen::Vector3 grad_vector = grad.block<1,3>(n,0);
grad.block<1,3>(n,0) = rotation * grad_vector;
}
DWI::set_DW_scheme (output_header, grad);
}
}
catch (Exception& e) {
e.display (2);
}
}
// Interpolator
int interp = 2; // cubic
opt = get_options ("interp");
if (opt.size()) {
interp = opt[0][0];
if (!warp && !template_header)
WARN ("interpolator choice ignored since the input image will not be regridded");
}
// Out of bounds value
float out_of_bounds_value = 0.0;
opt = get_options ("nan");
if (opt.size()) {
out_of_bounds_value = NAN;
if (!warp && !template_header)
示例12: add_kr
void KRComputation::add_kr(KRAverager & averager, const Eigen::MatrixXd & force_constants, int offset){
Eigen::VectorXd ks = force_constants.diagonal(-offset);
Eigen::VectorXd rs = force_constants.diagonal(offset);
averager.add_force_constant_vector(ks, rs);
}
示例13: H
TEST(SparseMatrixFunctionTests, testSchurComplement1)
{
try {
using namespace aslam::backend;
typedef sparse_block_matrix::SparseBlockMatrix<Eigen::MatrixXd> SparseBlockMatrix;
// Create the sparse Hessian. Two dense blocks. Three sparse.
int structure[5] = {2, 2, 3, 3, 3};
std::partial_sum(structure, structure + 5, structure);
int marginalizedStartingBlock = 2;
int marginalizedStartingIndex = structure[ marginalizedStartingBlock - 1 ];
double lambda = 1;
SparseBlockMatrix H(structure, structure, 5, 5, true);
Eigen::VectorXd e(H.rows());
e.setRandom();
Eigen::VectorXd b(H.rowBaseOfBlock(marginalizedStartingBlock));
b.setZero();
boost::shared_ptr<SparseBlockMatrix> A(H.slice(0, marginalizedStartingBlock, 0, marginalizedStartingBlock, true));
ASSERT_EQ(marginalizedStartingBlock, A->bRows());
ASSERT_EQ(marginalizedStartingBlock, A->bCols());
A->clear(false);
std::vector<Eigen::MatrixXd> invVi;
invVi.resize(H.bRows() - marginalizedStartingBlock);
// Fill in H.
*H.block(0, 0, true) = sm::eigen::randomCovariance<2>() * 100;
*H.block(1, 1, true) = sm::eigen::randomCovariance<2>() * 100;
*H.block(2, 2, true) = sm::eigen::randomCovariance<3>() * 100;
*H.block(3, 3, true) = sm::eigen::randomCovariance<3>() * 100;
*H.block(4, 4, true) = sm::eigen::randomCovariance<3>() * 100;
// Start with two off diagonals.
H.block(0, 4, true)->setRandom();
H.block(0, 4, true)->array() *= 100;
H.block(1, 4, true)->setRandom();
H.block(1, 4, true)->array() *= 100;
//std::cout << "H:\n" << H << std::endl;
applySchurComplement(H,
e,
lambda,
marginalizedStartingBlock,
true,
*A,
invVi,
b);
Eigen::MatrixXd Hd = H.toDense();
Eigen::MatrixXd U = Hd.topLeftCorner(marginalizedStartingIndex, marginalizedStartingIndex);
Eigen::MatrixXd V = Hd.bottomRightCorner(H.rows() - marginalizedStartingIndex, H.rows() - marginalizedStartingIndex);
Eigen::MatrixXd W = Hd.topRightCorner(marginalizedStartingIndex, H.rows() - marginalizedStartingIndex);
V.diagonal().array() += lambda;
Eigen::MatrixXd AA = U - W * V.inverse() * W.transpose();
AA.diagonal().array() += lambda;
Eigen::VectorXd epsSparse = e.tail(e.size() - marginalizedStartingIndex);
Eigen::VectorXd epsDense = e.head(marginalizedStartingIndex);
Eigen::VectorXd bb = epsDense - W * V.inverse() * epsSparse;
{
SCOPED_TRACE("");
Eigen::MatrixXd Asa = A->toDense().selfadjointView<Eigen::Upper>();
sm::eigen::assertNear(Asa, AA, 1e-12, SM_SOURCE_FILE_POS, "Testing the lhs schur complement");
}
{
SCOPED_TRACE("");
sm::eigen::assertNear(b, bb, 1e-12, SM_SOURCE_FILE_POS, "Testing the rhs schur complement");
}
// Let's try it again to make sure stuff gets initialized correctly.
applySchurComplement(H,
e,
lambda,
marginalizedStartingBlock,
true,
*A,
invVi,
b);
{
SCOPED_TRACE("");
Eigen::MatrixXd Asa = A->toDense().selfadjointView<Eigen::Upper>();
sm::eigen::assertNear(Asa, AA, 1e-12, SM_SOURCE_FILE_POS, "Testing the lhs schur complement");
}
{
SCOPED_TRACE("");
sm::eigen::assertNear(b, bb, 1e-12, SM_SOURCE_FILE_POS, "Testing the rhs schur complement");
}
// Now we check the update function.
Eigen::VectorXd dx(marginalizedStartingIndex);
dx.setRandom();
Eigen::VectorXd denseDs = V.inverse() * (epsSparse - W.transpose() * dx);
for (int i = 0; i < H.bRows() - marginalizedStartingBlock; ++i) {
Eigen::VectorXd outDsi;
buildDsi(i, H, e, marginalizedStartingBlock, invVi[i], dx, outDsi);
Eigen::VectorXd dsi = denseDs.segment(H.rowBaseOfBlock(i + marginalizedStartingBlock) - marginalizedStartingIndex, H.rowsOfBlock(i + marginalizedStartingBlock));
sm::eigen::assertNear(outDsi, dsi, 1e-12, SM_SOURCE_FILE_POS, "Checking the update step calculation");
}
} catch (const std::exception& e) {
FAIL() << "Exception: " << e.what();
}
}
示例14: rcppeigen_get_diag
//find the log-determinant of the diagonal of a matrix with RcppEigen -- useful for dmvnorm
//[[Rcpp::export]]
Eigen::MatrixXd rcppeigen_get_diag(const Eigen::Map<Eigen::MatrixXd> & A){
Eigen::MatrixXd temp = A.llt().matrixL();
return temp.diagonal();
}
示例15: compute
void AbsoluteOrientation::compute( std::vector<Eigen::Vector3d> &left, std::vector<Eigen::Vector3d> &right, Eigen::Quaterniond &result )
{
int i, pairNum = left.size();
Eigen::MatrixXd muLmuR = Eigen::MatrixXd::Zero(3,3), M = Eigen::MatrixXd::Zero(3,3),
curMat = Eigen::MatrixXd::Zero(3,3), N = Eigen::MatrixXd::Zero(4,4);
Eigen::Vector3d meanFirst(0,0,0), meanSecond(0,0,0); //assume points set to zero by constructor
//compute the mean of both point sets
for (i=0; i<pairNum; i++) {
meanFirst[0] += left[i][0]; meanFirst[1] += left[i][1]; meanFirst[2] += left[i][2];
meanSecond[0] += right[i][0]; meanSecond[1] += right[i][1]; meanSecond[2] += right[i][2];
}
meanFirst[0]/=pairNum; meanFirst[1]/=pairNum; meanFirst[2]/=pairNum;
meanSecond[0]/=pairNum; meanSecond[1]/=pairNum; meanSecond[2]/=pairNum;
//compute the matrix muLmuR
muLmuR(0,0) = meanFirst[0]*meanSecond[0];
muLmuR(0,1) = meanFirst[0]*meanSecond[1];
muLmuR(0,2) = meanFirst[0]*meanSecond[2];
muLmuR(1,0) = meanFirst[1]*meanSecond[0];
muLmuR(1,1) = meanFirst[1]*meanSecond[1];
muLmuR(1,2) = meanFirst[1]*meanSecond[2];
muLmuR(2,0) = meanFirst[2]*meanSecond[0];
muLmuR(2,1) = meanFirst[2]*meanSecond[1];
muLmuR(2,2) = meanFirst[2]*meanSecond[2];
//compute the matrix M
for (i=0; i<pairNum; i++) {
Eigen::Vector3d &leftPoint = left[i];
Eigen::Vector3d &rightPoint = right[i];
curMat(0,0) = leftPoint[0]*rightPoint[0];
curMat(0,1) = leftPoint[0]*rightPoint[1];
curMat(0,2) = leftPoint[0]*rightPoint[2];
curMat(1,0) = leftPoint[1]*rightPoint[0];
curMat(1,1) = leftPoint[1]*rightPoint[1];
curMat(1,2) = leftPoint[1]*rightPoint[2];
curMat(2,0) = leftPoint[2]*rightPoint[0];
curMat(2,1) = leftPoint[2]*rightPoint[1];
curMat(2,2) = leftPoint[2]*rightPoint[2];
M+=curMat;
}
M+= (muLmuR *(-pairNum));
//compute the matrix N
Eigen::MatrixXd tmpMat = Eigen::MatrixXd::Zero(3,3);
double A12, A20, A01;
double traceM = 0.0;
for(i=0; i<3; i++) traceM += M(i,i);
tmpMat.diagonal() = Eigen::VectorXd::Constant(3, -traceM); //tmpMat.fill_diagonal(-traceM);
tmpMat += (M + M.transpose());
A12 = M(1,2) - M(2,1);
A20 = M(2,0) - M(0,2);
A01 = M(0,1) - M(1,0);
N(0,0)=traceM; N(0,1)=A12; N(0,2)=A20; N(0,3)=A01;
N(1,0)=A12;
N(2,0)=A20;
N(3,0)=A01;
N.bottomRightCorner(3,3) = tmpMat; //N.update(tmpMat,1,1);
////find the eigenvector that belongs to the maximal
////eigenvalue of N, eigenvalues are sorted from smallest to largest
//vnl_symmetric_eigensystem<double> eigenSystem(N);
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> es;
es.compute(N);
Eigen::MatrixXd V = es.eigenvectors();
//std::stringstream ss;ss << V;
//qDebug() << qPrintable(ss.str().c_str());
//setRotationQuaternion(eigenSystem.V(0,3),eigenSystem.V(1,3),eigenSystem.V(2,3),eigenSystem.V(3,3), true);
result = Eigen::Quaterniond( V(0,3),V(1,3),V(2,3),V(3,3) ).normalized();
}