当前位置: 首页>>代码示例>>C++>>正文


C++ VectorXd::squaredNorm方法代码示例

本文整理汇总了C++中VectorXd::squaredNorm方法的典型用法代码示例。如果您正苦于以下问题:C++ VectorXd::squaredNorm方法的具体用法?C++ VectorXd::squaredNorm怎么用?C++ VectorXd::squaredNorm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在VectorXd的用法示例。


在下文中一共展示了VectorXd::squaredNorm方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: compute_dog_leg

VectorXd Optimizer::compute_dog_leg(double alpha, const VectorXd& h_sd,
    const VectorXd& h_gn, double delta, double& gain_ratio_denominator) {
  if (h_gn.norm() <= delta) {
    gain_ratio_denominator = current_SSE_at_linpoint;
    return h_gn;
  }

  double h_sd_norm = h_sd.norm();

  if ((alpha * h_sd_norm) >= delta) {
    gain_ratio_denominator = delta * (2 * alpha * h_sd_norm - delta)
        / (2 * alpha);
    return (delta / h_sd_norm) * h_sd;
  } else {
    // complicated case: calculate intersection of trust region with
    // line between Gauss-Newton and steepest descent solutions
    VectorXd a = alpha * h_sd;
    VectorXd b = h_gn;
    double c = a.dot(b - a);
    double b_a_norm2 = (b - a).squaredNorm();
    double a_norm2 = a.squaredNorm();
    double delta2 = delta * delta;
    double sqrt_term = sqrt(c * c + b_a_norm2 * (delta2 - a_norm2));
    double beta;
    if (c <= 0) {
      beta = (-c + sqrt_term) / b_a_norm2;
    } else {
      beta = (delta2 - a_norm2) / (c + sqrt_term);
    }

    gain_ratio_denominator = .5 * alpha * (1 - beta) * (1 - beta) * h_sd_norm
        * h_sd_norm + beta * (2 - beta) * current_SSE_at_linpoint;
    return (alpha * h_sd + beta * (h_gn - alpha * h_sd));
  }
}
开发者ID:Duckietown-NCTU,项目名称:Software,代码行数:35,代码来源:Optimizer.cpp

示例2: staticSolveNewtonsForces

void Simulation::staticSolveNewtonsForces(MatrixXd& TV, MatrixXi& TT, MatrixXd& B, VectorXd& fixed_forces, int ignorePastIndex){
	cout<<"----------------Static Solve Newtons, Fix Forces-------------"<<endl;
	//Newtons method static solve for minimum Strain E
	SparseMatrix<double> forceGradient;
	forceGradient.resize(3*TV.rows(), 3*TV.rows());
	SparseMatrix<double> forceGradientStaticBlock;
	forceGradientStaticBlock.resize(3*ignorePastIndex, 3*ignorePastIndex);
	VectorXd f, x;
	f.resize(3*TV.rows());
	f.setZero();
	x.resize(3*TV.rows());
	x.setZero();
	setTVtoX(x, TV);
	cout<<x<<endl;
	int NEWTON_MAX = 100, k=0;
	for(k=0; k<NEWTON_MAX; k++){
		xToTV(x, TV);

		calculateForceGradient(TV, forceGradient);
		calculateElasticForces(f, TV);
		for(unsigned int i=0; i<fixed_forces.rows(); i++){
			if(abs(fixed_forces(i))>0.000001){
				if(i>3*ignorePastIndex){
					cout<<"Problem Check simulation.cpp file"<<endl;
					cout<<ignorePastIndex<<endl;
					cout<<i<<" - "<<fixed_forces(i)<<endl;
					exit(0);
				}
				f(i) = fixed_forces(i);
			}
		}
		
		//Block forceGrad and f to exclude the fixed verts
		forceGradientStaticBlock = forceGradient.block(0,0, 3*(ignorePastIndex), 3*ignorePastIndex);
		VectorXd fblock = f.head(ignorePastIndex*3);

		//Sparse QR 
		// SparseQR<SparseMatrix<double>, COLAMDOrdering<int>> sqr;
		// sqr.compute(forceGradientStaticBlock);
		// VectorXd deltaX = -1*sqr.solve(fblock);

		// Conj Grad
		ConjugateGradient<SparseMatrix<double>> cg;
		cg.compute(forceGradientStaticBlock);
		if(cg.info() == Eigen::NumericalIssue){
			cout<<"ConjugateGradient numerical issue"<<endl;
			exit(0);
		}
		VectorXd deltaX = -1*cg.solve(fblock);

		// // Sparse Cholesky LL^T
		// SimplicialLLT<SparseMatrix<double>> llt;
		// llt.compute(forceGradientStaticBlock);
		// if(llt.info() == Eigen::NumericalIssue){
		// 	cout<<"Possibly using a non- pos def matrix in the LLT method"<<endl;
		// 	exit(0);
		// }
		// VectorXd deltaX = -1*llt.solve(fblock);

		x.segment(0,3*(ignorePastIndex))+=deltaX;
		cout<<"		Newton Iter "<<k<<endl;

		if(x != x){
			cout<<"NAN"<<endl;
			exit(0);
		}
		cout<<"fblock square norm"<<endl;
		cout<<fblock.squaredNorm()/fblock.size()<<endl;
		double strainE = 0;
		for(int i=0; i< M.tets.size(); i++){
			strainE += M.tets[i].undeformedVol*M.tets[i].energyDensity;
		}
		cout<<strainE<<endl;
		if (fblock.squaredNorm()/fblock.size() < 0.00001){
			break;
		}

	}
	if(k== NEWTON_MAX){
		cout<<"ERROR Static Solve: Newton max reached"<<endl;
		cout<<k<<endl;
		exit(0);
	}
	double strainE = 0;
	for(int i=0; i< M.tets.size(); i++){
		strainE += M.tets[i].undeformedVol*M.tets[i].energyDensity;
	}
	cout<<"strain E"<<strainE<<endl;
	cout<<"x[0] "<<x(0)<<endl;
	cout<<"x[1] "<<x(1)<<endl;
	exit(0);				

	cout<<"-------------------"<<endl;
}
开发者ID:itsvismay,项目名称:ElasticBodies,代码行数:94,代码来源:simulation.cpp

示例3: swigEigenTest

double swigEigenTest(double* X, int m, int n) {
	auto A = eigenWrap2D_nocopy(X, m, n);
	VectorXd rowSums = A.rowwise().sum();
	return rowSums.squaredNorm();
}
开发者ID:dblalock,项目名称:dig,代码行数:5,代码来源:tree.cpp

示例4: squaredL2Dist

double squaredL2Dist(const VectorXd& x, const VectorXd& y) {
	VectorXd diff = x - y;
	return diff.squaredNorm();
}
开发者ID:dblalock,项目名称:dig,代码行数:4,代码来源:tree.cpp

示例5: powells_dog_leg

void Optimizer::powells_dog_leg(int* num_iterations, double delta0,
    int max_iterations, double epsilon1, double epsilon2, double epsilon3) {
  // Batch optimization
  int num_iter = 0;
  // current estimate is used as new linearization point
  function_system.estimate_to_linpoint();

  double delta = delta0;
  SparseSystem jacobian(1, 1);
  VectorXd f_x;
  VectorXd grad;

  bool found = powells_dog_leg_update(epsilon1, epsilon3, jacobian, f_x, grad);

  double rho_denominator;

  while ((not found) && (max_iterations == 0 || num_iter < max_iterations)) {
    num_iter++;
    cout << "PDL Iteration " << num_iter << " residual: " << f_x.squaredNorm()
        << endl;
    // compute alpha
    double alpha = grad.squaredNorm() / (jacobian * grad).squaredNorm();
    // steepest descent
    VectorXd h_sd = -grad;
    // solve Gauss Newton
    VectorXd h_gn = compute_gauss_newton_step(jacobian, &function_system._R);
    // compute dog leg h_dl
    // x0 = x: remember (and return) linearization point of R
    function_system.linpoint_to_estimate();
    VectorXd h_dl = compute_dog_leg(alpha, h_sd, h_gn, delta, rho_denominator);
    // Evaluate new solution, update estimate and trust region.
    if (h_dl.norm() <= epsilon2) {
      found = true;
    } else {
      // new estimate
      // change linearization point directly (original LP saved in estimate)
      function_system.self_exmap(h_dl);
      // calculate gain ratio rho
      VectorXd f_x_new = function_system.weighted_errors(LINPOINT);
      double rho = (f_x.squaredNorm() - f_x_new.squaredNorm())
          / (rho_denominator);
      if (rho > 0) {
        // accept new estimate
        cout << "accepted" << endl;
        f_x = f_x_new;
        found = powells_dog_leg_update(epsilon1, epsilon3, jacobian, f_x, grad);
      } else {
        // reject new estimate, overwrite with last saved one
        cout << "rejected" << endl;
        function_system.estimate_to_linpoint();
      }
      if (rho > 0.75) {
        delta = max(delta, 3.0 * h_dl.norm());
      } else if (rho < 0.25) {
        delta *= 0.5;
        found = (delta <= epsilon2);
      }
    }
  }
  if (num_iterations) {
    *num_iterations = num_iter;
  }
  // Overwrite potentially rejected linearization point with last saved one
  // (could be identical if it was accepted in the last iteration).

  function_system.swap_estimates();

}
开发者ID:Duckietown-NCTU,项目名称:Software,代码行数:68,代码来源:Optimizer.cpp

示例6: levenberg_marquardt

void Optimizer::levenberg_marquardt(const Properties& prop,
    int* num_iterations) {
  int num_iter = 0;
  double lambda = prop.lm_lambda0;
  // Using linpoint as current estimate below.
  function_system.estimate_to_linpoint();

  // Get the current Jacobian at the linearization point.
  SparseSystem jacobian = function_system.jacobian();

  // Get the error residual vector at the current linearization point.
  VectorXd r = function_system.weighted_errors(LINPOINT);

  // Record the absolute sum-of-squares error (i.e., objective function value) here.
  double error = r.squaredNorm();

#ifdef USE_PDL_STOPPING_CRITERIA
  // Compute the gradient direction vector at the current linearization point
  VectorXd g = mul_SparseMatrixTrans_Vector(jacobian, r);
#endif

  double error_diff, error_new;

  // solve at J'J + lambda*diag(J'J)
  VectorXd delta = compute_gauss_newton_step(jacobian, &function_system._R,
      lambda);

  while (
  // We ALWAYS use these stopping criteria
  ((prop.max_iterations <= 0) || (num_iter < prop.max_iterations))
      && (delta.norm() > prop.epsilon2)

#ifdef USE_PDL_STOPPING_CRITERIA
      && (r.lpNorm<Eigen::Infinity>() > prop.epsilon3)
      && (g.lpNorm<Eigen::Infinity>() > prop.epsilon1)
#else
      && (error > prop.epsilon_abs)
#endif
  )  // end while conditional
  {
    num_iter++;

    // remember the last accepted linearization point
    function_system.linpoint_to_estimate();
    // Apply the delta vector DIRECTLY TO THE LINEARIZATION POINT!
    function_system.self_exmap(delta);
    error_new = function_system.weighted_errors(LINPOINT).squaredNorm();
    error_diff = error - error_new;
    // feedback
    if (!prop.quiet) {
      cout << "LM Iteration " << num_iter << ": (lambda=" << lambda << ") ";
      if (error_diff > 0.) {
        cout << "residual: " << error_new << endl;
      } else {
        cout << "rejected" << endl;
      }
    }
    // decide if acceptable
    if (error_diff > 0.) {

#ifndef USE_PDL_STOPPING_CRITERIA
      if (error_diff < prop.epsilon_rel * error) {
        break;
      }
#endif

      // Update lambda
      lambda /= prop.lm_lambda_factor;

      // Record the error at the newly-accepted estimate.
      error = error_new;

      // Relinearize around the newly-accepted estimate.
      jacobian = function_system.jacobian();

#ifdef USE_PDL_STOPPING_CRITERIA
      r = function_system.weighted_errors(LINPOINT);
      g = mul_SparseMatrixTrans_Vector(jacobian, r);
#endif
    } else {
      // reject new estimate
      lambda *= prop.lm_lambda_factor;
      // restore previous estimate
      function_system.estimate_to_linpoint();
    }

    // Compute the step for the next iteration.
    delta = compute_gauss_newton_step(jacobian, &function_system._R, lambda);

  } // end while

  if (num_iterations != NULL) {
    *num_iterations = num_iter;
  }
  // Copy current estimate contained in linpoint.
  function_system.linpoint_to_estimate();
}
开发者ID:Duckietown-NCTU,项目名称:Software,代码行数:97,代码来源:Optimizer.cpp

示例7: gauss_newton

void Optimizer::gauss_newton(const Properties& prop, int* num_iterations) {
  // Batch optimization
  int num_iter = 0;

  // Set the new linearization point to be the current estimate.
  function_system.estimate_to_linpoint();
  // Compute Jacobian about current estimate.
  SparseSystem jacobian = function_system.jacobian();

  // Get the current error residual vector
  VectorXd r = function_system.weighted_errors(LINPOINT);

#ifdef USE_PDL_STOPPING_CRITERIA
  // Compute the current gradient direction vector
  VectorXd g = mul_SparseMatrixTrans_Vector(jacobian, r);
#else
  double error = r.squaredNorm();
  double error_new;
  // We haven't computed a step yet, so this initialization is to ensure
  // that we never skip over the while loop as a result of failing
  // change-in-error check.
  double error_diff = prop.epsilon_rel * error + 1;
#endif

  // Compute Gauss-Newton step h_{gn} to get to the next estimated optimizing point.
  VectorXd delta = compute_gauss_newton_step(jacobian);

  while (
  // We ALWAYS use these criteria
  ((prop.max_iterations <= 0) || (num_iter < prop.max_iterations))
      && (delta.norm() > prop.epsilon2)

#ifdef USE_PDL_STOPPING_CRITERIA
      && (r.lpNorm<Eigen::Infinity>() > prop.epsilon3)
      && (g.lpNorm<Eigen::Infinity>() > prop.epsilon1)

#else // Custom stopping criteria for GN
      && (error > prop.epsilon_abs)
      && (fabs(error_diff) > prop.epsilon_rel * error)
#endif

  ) // end while conditional
  {
    num_iter++;

    // Apply the Gauss-Newton step h_{gn} to...
    function_system.apply_exmap(delta);
    // ...set the new linearization point to be the current estimate.
    function_system.estimate_to_linpoint();
    // Relinearize about the new current estimate.
    jacobian = function_system.jacobian();
    // Compute the error residual vector at the new estimate.
    r = function_system.weighted_errors(LINPOINT);

#ifdef USE_PDL_STOPPING_CRITERIA
    g = mul_SparseMatrixTrans_Vector(jacobian, r);
#else
    // Update the error difference in errors between the previous and
    // current estimates.
    error_new = r.squaredNorm();
    error_diff = error - error_new;
    error = error_new;  // Record the absolute error at the current estimate
#endif

    // Compute Gauss-Newton step h_{gn} to get to the next estimated
    // optimizing point.
    delta = compute_gauss_newton_step(jacobian);
    if (!prop.quiet) {
      cout << "Iteration " << num_iter << ": residual ";

#ifdef USE_PDL_STOPPING_CRITERIA
      cout << r.squaredNorm();
#else
      cout << error;
#endif
      cout << endl;
    }
  }  //end while

  if (num_iterations != NULL) {
    *num_iterations = num_iter;
  }
  _cholesky->get_R(function_system._R);
}
开发者ID:Duckietown-NCTU,项目名称:Software,代码行数:84,代码来源:Optimizer.cpp

示例8: renderNewtonsMethod

void ImplicitNewmark::renderNewtonsMethod(){
	//Implicit Code
	v_k.setZero();
	x_k.setZero();
	x_k = x_old;
	v_k = v_old;
	VectorXd f_old = f;

	forceGradient.setZero();
	bool Nan=false;
	int NEWTON_MAX = 100, i =0;
	double gamma = 0.5;
	double beta =0.25;

	// cout<<"--------"<<simTime<<"-------"<<endl;
	// cout<<"x_k"<<endl;
	// cout<<x_k<<endl<<endl;
	// cout<<"v_k"<<endl;
	// cout<<v_k<<endl<<endl;
	// cout<<"--------------------"<<endl;
	for( i=0; i<NEWTON_MAX; i++){
		grad_g.setZero();
	
		NewmarkXtoTV(x_k, TVk);//TVk value changed in function
		NewmarkCalculateElasticForceGradient(TVk, forceGradient); 
		NewmarkCalculateForces(TVk, forceGradient, x_k, f);

		VectorXd g = x_k - x_old - h*v_old - (h*h/2)*(1-2*beta)*InvMass*f_old - (h*h*beta)*InvMass*f;
		grad_g = Ident - h*h*beta*InvMass*(forceGradient+(rayleighCoeff/h)*forceGradient);

		// VectorXd g = RegMass*x_k - RegMass*x_old - RegMass*h*v_old - (h*h/2)*(1-2*beta)*f_old - (h*h*beta)*f;
		// grad_g = RegMass - h*h*beta*(forceGradient+(rayleighCoeff/h)*forceGradient);
	
		// cout<<"G"<<t<<endl;
		// cout<<g<<endl<<endl;
		// cout<<"G Gradient"<<t<<endl;
		// cout<<grad_g<<endl;

		//solve for delta x
		// Conj Grad
		// ConjugateGradient<SparseMatrix<double>> cg;
		// cg.compute(grad_g);
		// VectorXd deltaX = -1*cg.solve(g);

		// Sparse Cholesky LL^T
		// SimplicialLLT<SparseMatrix<double>> llt;
		// llt.compute(grad_g);
		// VectorXd deltaX = -1* llt.solve(g);

		//Sparse QR 
		SparseQR<SparseMatrix<double>, COLAMDOrdering<int>> sqr;
		sqr.compute(grad_g);
		VectorXd deltaX = -1*sqr.solve(g);

		// CholmodSimplicialLLT<SparseMatrix<double>> cholmodllt;
		// cholmodllt.compute(grad_g);
		// VectorXd deltaX = -cholmodllt.solve(g);
		

		x_k+=deltaX;
		if(x_k != x_k){
			Nan = true;
			break;
		}
		if(g.squaredNorm()<.00000001){
			break;
		}
	}
	if(Nan){
		cout<<"ERROR NEWMARK: Newton's method doesn't converge"<<endl;
		cout<<i<<endl;
		exit(0);
	}
	if(i== NEWTON_MAX){
		cout<<"ERROR NEWMARK: Newton max reached"<<endl;
		cout<<i<<endl;
		exit(0);
	}
	v_old = v_old + h*(1-gamma)*InvMass*f_old + h*gamma*InvMass*f;
	x_old = x_k;
}
开发者ID:itsvismay,项目名称:ElasticBodies,代码行数:81,代码来源:ImplicitNewmark.cpp

示例9: fastLasso


//.........这里部分代码省略.........
  	while(k < maxActive) {

  		// extract current correlations of inactive variables
  		VectorXd corInactiveY(m);
  		for(int j = 0; j < m; j++) {
  			corInactiveY(j) = corY(inactive(j));
  		}
  		// compute absolute values of correlations and find maximum
  		VectorXd absCorInactiveY = corInactiveY.cwiseAbs();
  		double maxCor = absCorInactiveY.maxCoeff();
  		// update current lambda
  		if(k == 0) {	// no active variables
  			previousLambda = maxCor;
  		} else {
  			previousLambda = currentLambda;
  		}
  		currentLambda = maxCor;
  		if(currentLambda <= rescaledLambda) break;

  		if(drops.size() == 0) {
  			// new active variables
  			VectorXi newActive = findNewActive(absCorInactiveY, maxCor - eps);
  			// do calculations for new active variables
  			for(int j = 0; j < newActive.size(); j++) {
  				// update Cholesky L of Gram matrix of active variables
  				// TODO: put this into void function
  				int newJ = inactive(newActive(j));
  				VectorXd xNewJ;
  				double newX;
  				if(useGram) {
  					newX = Gram(newJ, newJ);
  				} else {
  					xNewJ = xs.col(newJ);
  					newX = xNewJ.squaredNorm();
  				}
  				double normNewX = sqrt(newX);
  				if(k == 0) {	// no active variables, L is empty
  					L.resize(1,1);
  					L(0, 0) = normNewX;
  					rank = 1;
  				} else {
  					VectorXd oldX(k);
  					if(useGram) {
  						for(int j = 0; j < k; j++) {
  							oldX(j) = Gram(active(j), newJ);
  						}
  					} else {
  						for(int j = 0; j < k; j++) {
  							oldX(j) = xNewJ.dot(xs.col(active(j)));
  						}
  					}
  					VectorXd l = L.triangularView<Lower>().solve(oldX);
  					double lkk = newX - l.squaredNorm();
  					// check if L is machine singular
  					if(lkk > eps) {
  						// no singularity: update Cholesky L
  						lkk = sqrt(lkk);
  						rank++;
  						// add new row and column to Cholesky L
  						// this is a little trick: sometimes we need
  						// lower triangular matrix, sometimes upper
  						// hence we define quadratic matrix and use
  						// triangularView() to interpret matrix the
  						// correct way
  						L.conservativeResize(k+1, k+1);
  						for(int j = 0; j < k; j++) {
开发者ID:aalfons,项目名称:sparseLTSEigen,代码行数:67,代码来源:fastLasso.cpp


注:本文中的VectorXd::squaredNorm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。